mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
blk-mq: fold blk_mq_sched_insert_requests into blk_mq_dispatch_plug_list
blk_mq_dispatch_plug_list is the only caller of blk_mq_sched_insert_requests, and it makes sense to just fold it there as blk_mq_sched_insert_requests isn't specific to I/O schedulers despite the name. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230413064057.707578-6-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
94aa228c2a
commit
05a9311770
@ -455,30 +455,6 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
||||
blk_mq_run_hw_queue(hctx, async);
|
||||
}
|
||||
|
||||
void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
struct blk_mq_ctx *ctx,
|
||||
struct list_head *list, bool run_queue_async)
|
||||
{
|
||||
struct elevator_queue *e;
|
||||
struct request_queue *q = hctx->queue;
|
||||
|
||||
/*
|
||||
* blk_mq_sched_insert_requests() is called from flush plug
|
||||
* context only, and hold one usage counter to prevent queue
|
||||
* from being released.
|
||||
*/
|
||||
percpu_ref_get(&q->q_usage_counter);
|
||||
|
||||
e = hctx->queue->elevator;
|
||||
if (e) {
|
||||
e->type->ops.insert_requests(hctx, list, false);
|
||||
blk_mq_run_hw_queue(hctx, run_queue_async);
|
||||
} else {
|
||||
blk_mq_insert_requests(hctx, ctx, list, run_queue_async);
|
||||
}
|
||||
percpu_ref_put(&q->q_usage_counter);
|
||||
}
|
||||
|
||||
static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int hctx_idx)
|
||||
|
@ -18,9 +18,6 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
|
||||
|
||||
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
||||
bool run_queue, bool async);
|
||||
void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
struct blk_mq_ctx *ctx,
|
||||
struct list_head *list, bool run_queue_async);
|
||||
|
||||
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
|
||||
|
||||
|
@ -2497,9 +2497,9 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
|
||||
blk_mq_run_hw_queue(hctx, false);
|
||||
}
|
||||
|
||||
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
||||
struct list_head *list, bool run_queue_async)
|
||||
|
||||
static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
struct blk_mq_ctx *ctx, struct list_head *list,
|
||||
bool run_queue_async)
|
||||
{
|
||||
struct request *rq;
|
||||
enum hctx_type type = hctx->type;
|
||||
@ -2725,7 +2725,16 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
|
||||
|
||||
plug->mq_list = requeue_list;
|
||||
trace_block_unplug(this_hctx->queue, depth, !from_sched);
|
||||
blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
|
||||
|
||||
percpu_ref_get(&this_hctx->queue->q_usage_counter);
|
||||
if (this_hctx->queue->elevator) {
|
||||
this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
|
||||
&list, false);
|
||||
blk_mq_run_hw_queue(this_hctx, from_sched);
|
||||
} else {
|
||||
blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
|
||||
}
|
||||
percpu_ref_put(&this_hctx->queue->q_usage_counter);
|
||||
}
|
||||
|
||||
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
|
@ -69,8 +69,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
bool at_head);
|
||||
void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
|
||||
bool run_queue);
|
||||
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
||||
struct list_head *list, bool run_queue_async);
|
||||
|
||||
/*
|
||||
* CPU -> queue mappings
|
||||
|
@ -820,7 +820,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
|
||||
* Called from blk_mq_sched_insert_request() or blk_mq_dispatch_plug_list().
|
||||
*/
|
||||
static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
struct list_head *list, bool at_head)
|
||||
|
Loading…
Reference in New Issue
Block a user