mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-06 05:13:18 +00:00
block: return ELEVATOR_DISCARD_MERGE if possible
When merging one bio to request, if they are discard IO and the queue
supports multi-range discard, we need to return ELEVATOR_DISCARD_MERGE
because both block core and related drivers(nvme, virtio-blk) doesn't
handle mixed discard io merge(traditional IO merge together with
discard merge) well.
Fix the issue by returning ELEVATOR_DISCARD_MERGE in this situation,
so both blk-mq and drivers just need to handle multi-range discard.
Reported-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Fixes: 2705dfb209
("block: fix discard request merge")
Link: https://lore.kernel.org/r/20210729034226.1591070-1-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a11d7fc2d0
commit
866663b7b5
@ -2361,6 +2361,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
|
|||||||
__rq = bfq_find_rq_fmerge(bfqd, bio, q);
|
__rq = bfq_find_rq_fmerge(bfqd, bio, q);
|
||||||
if (__rq && elv_bio_merge_ok(__rq, bio)) {
|
if (__rq && elv_bio_merge_ok(__rq, bio)) {
|
||||||
*req = __rq;
|
*req = __rq;
|
||||||
|
|
||||||
|
if (blk_discard_mergable(__rq))
|
||||||
|
return ELEVATOR_DISCARD_MERGE;
|
||||||
return ELEVATOR_FRONT_MERGE;
|
return ELEVATOR_FRONT_MERGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -705,22 +705,6 @@ static void blk_account_io_merge_request(struct request *req)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Two cases of handling DISCARD merge:
|
|
||||||
* If max_discard_segments > 1, the driver takes every bio
|
|
||||||
* as a range and send them to controller together. The ranges
|
|
||||||
* needn't to be contiguous.
|
|
||||||
* Otherwise, the bios/requests will be handled as same as
|
|
||||||
* others which should be contiguous.
|
|
||||||
*/
|
|
||||||
static inline bool blk_discard_mergable(struct request *req)
|
|
||||||
{
|
|
||||||
if (req_op(req) == REQ_OP_DISCARD &&
|
|
||||||
queue_max_discard_segments(req->q) > 1)
|
|
||||||
return true;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static enum elv_merge blk_try_req_merge(struct request *req,
|
static enum elv_merge blk_try_req_merge(struct request *req,
|
||||||
struct request *next)
|
struct request *next)
|
||||||
{
|
{
|
||||||
|
@ -336,6 +336,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
|
|||||||
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
|
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
|
||||||
if (__rq && elv_bio_merge_ok(__rq, bio)) {
|
if (__rq && elv_bio_merge_ok(__rq, bio)) {
|
||||||
*req = __rq;
|
*req = __rq;
|
||||||
|
|
||||||
|
if (blk_discard_mergable(__rq))
|
||||||
|
return ELEVATOR_DISCARD_MERGE;
|
||||||
return ELEVATOR_BACK_MERGE;
|
return ELEVATOR_BACK_MERGE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -677,6 +677,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
|
|||||||
|
|
||||||
if (elv_bio_merge_ok(__rq, bio)) {
|
if (elv_bio_merge_ok(__rq, bio)) {
|
||||||
*rq = __rq;
|
*rq = __rq;
|
||||||
|
if (blk_discard_mergable(__rq))
|
||||||
|
return ELEVATOR_DISCARD_MERGE;
|
||||||
return ELEVATOR_FRONT_MERGE;
|
return ELEVATOR_FRONT_MERGE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1519,6 +1519,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
|
|||||||
return offset << SECTOR_SHIFT;
|
return offset << SECTOR_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Two cases of handling DISCARD merge:
|
||||||
|
* If max_discard_segments > 1, the driver takes every bio
|
||||||
|
* as a range and send them to controller together. The ranges
|
||||||
|
* needn't to be contiguous.
|
||||||
|
* Otherwise, the bios/requests will be handled as same as
|
||||||
|
* others which should be contiguous.
|
||||||
|
*/
|
||||||
|
static inline bool blk_discard_mergable(struct request *req)
|
||||||
|
{
|
||||||
|
if (req_op(req) == REQ_OP_DISCARD &&
|
||||||
|
queue_max_discard_segments(req->q) > 1)
|
||||||
|
return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int bdev_discard_alignment(struct block_device *bdev)
|
static inline int bdev_discard_alignment(struct block_device *bdev)
|
||||||
{
|
{
|
||||||
struct request_queue *q = bdev_get_queue(bdev);
|
struct request_queue *q = bdev_get_queue(bdev);
|
||||||
|
Loading…
Reference in New Issue
Block a user