mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 05:06:29 +00:00
blk-mq: don't run the hw_queue from blk_mq_insert_request
blk_mq_insert_request takes two bool parameters to control how to run the queue at the end of the function. Move the blk_mq_run_hw_queue call to the callers that want it instead. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230413064057.707578-15-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e1f44ac0d7
commit
f0dbe6e88e
@ -44,8 +44,7 @@
|
|||||||
|
|
||||||
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
|
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
|
||||||
|
|
||||||
static void blk_mq_insert_request(struct request *rq, bool at_head,
|
static void blk_mq_insert_request(struct request *rq, bool at_head);
|
||||||
bool run_queue, bool async);
|
|
||||||
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||||
struct list_head *list);
|
struct list_head *list);
|
||||||
|
|
||||||
@ -1292,6 +1291,8 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
|
|||||||
*/
|
*/
|
||||||
void blk_execute_rq_nowait(struct request *rq, bool at_head)
|
void blk_execute_rq_nowait(struct request *rq, bool at_head)
|
||||||
{
|
{
|
||||||
|
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||||
|
|
||||||
WARN_ON(irqs_disabled());
|
WARN_ON(irqs_disabled());
|
||||||
WARN_ON(!blk_rq_is_passthrough(rq));
|
WARN_ON(!blk_rq_is_passthrough(rq));
|
||||||
|
|
||||||
@ -1302,10 +1303,13 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
|
|||||||
* device, directly accessing the plug instead of using blk_mq_plug()
|
* device, directly accessing the plug instead of using blk_mq_plug()
|
||||||
* should not have any consequences.
|
* should not have any consequences.
|
||||||
*/
|
*/
|
||||||
if (current->plug && !at_head)
|
if (current->plug && !at_head) {
|
||||||
blk_add_rq_to_plug(current->plug, rq);
|
blk_add_rq_to_plug(current->plug, rq);
|
||||||
else
|
return;
|
||||||
blk_mq_insert_request(rq, at_head, true, false);
|
}
|
||||||
|
|
||||||
|
blk_mq_insert_request(rq, at_head);
|
||||||
|
blk_mq_run_hw_queue(hctx, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
|
||||||
|
|
||||||
@ -1355,6 +1359,7 @@ static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
|
|||||||
*/
|
*/
|
||||||
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
|
blk_status_t blk_execute_rq(struct request *rq, bool at_head)
|
||||||
{
|
{
|
||||||
|
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||||
struct blk_rq_wait wait = {
|
struct blk_rq_wait wait = {
|
||||||
.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
|
.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
|
||||||
};
|
};
|
||||||
@ -1366,7 +1371,8 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head)
|
|||||||
rq->end_io = blk_end_sync_rq;
|
rq->end_io = blk_end_sync_rq;
|
||||||
|
|
||||||
blk_account_io_start(rq);
|
blk_account_io_start(rq);
|
||||||
blk_mq_insert_request(rq, at_head, true, false);
|
blk_mq_insert_request(rq, at_head);
|
||||||
|
blk_mq_run_hw_queue(hctx, false);
|
||||||
|
|
||||||
if (blk_rq_is_poll(rq)) {
|
if (blk_rq_is_poll(rq)) {
|
||||||
blk_rq_poll_completion(rq, &wait.done);
|
blk_rq_poll_completion(rq, &wait.done);
|
||||||
@ -1440,14 +1446,14 @@ static void blk_mq_requeue_work(struct work_struct *work)
|
|||||||
} else if (rq->rq_flags & RQF_SOFTBARRIER) {
|
} else if (rq->rq_flags & RQF_SOFTBARRIER) {
|
||||||
rq->rq_flags &= ~RQF_SOFTBARRIER;
|
rq->rq_flags &= ~RQF_SOFTBARRIER;
|
||||||
list_del_init(&rq->queuelist);
|
list_del_init(&rq->queuelist);
|
||||||
blk_mq_insert_request(rq, true, false, false);
|
blk_mq_insert_request(rq, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while (!list_empty(&rq_list)) {
|
while (!list_empty(&rq_list)) {
|
||||||
rq = list_entry(rq_list.next, struct request, queuelist);
|
rq = list_entry(rq_list.next, struct request, queuelist);
|
||||||
list_del_init(&rq->queuelist);
|
list_del_init(&rq->queuelist);
|
||||||
blk_mq_insert_request(rq, false, false, false);
|
blk_mq_insert_request(rq, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_mq_run_hw_queues(q, false);
|
blk_mq_run_hw_queues(q, false);
|
||||||
@ -2507,8 +2513,7 @@ static void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx,
|
|||||||
blk_mq_run_hw_queue(hctx, run_queue_async);
|
blk_mq_run_hw_queue(hctx, run_queue_async);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_insert_request(struct request *rq, bool at_head,
|
static void blk_mq_insert_request(struct request *rq, bool at_head)
|
||||||
bool run_queue, bool async)
|
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||||
@ -2568,9 +2573,6 @@ static void blk_mq_insert_request(struct request *rq, bool at_head,
|
|||||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||||
spin_unlock(&ctx->lock);
|
spin_unlock(&ctx->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (run_queue)
|
|
||||||
blk_mq_run_hw_queue(hctx, async);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
|
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
|
||||||
@ -2655,12 +2657,13 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
|
|
||||||
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
|
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
|
||||||
blk_mq_insert_request(rq, false, false, false);
|
blk_mq_insert_request(rq, false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((rq->rq_flags & RQF_ELV) || !blk_mq_get_budget_and_tag(rq)) {
|
if ((rq->rq_flags & RQF_ELV) || !blk_mq_get_budget_and_tag(rq)) {
|
||||||
blk_mq_insert_request(rq, false, true, false);
|
blk_mq_insert_request(rq, false);
|
||||||
|
blk_mq_run_hw_queue(hctx, false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2683,7 +2686,7 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
|
|||||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||||
|
|
||||||
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
|
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
|
||||||
blk_mq_insert_request(rq, false, false, false);
|
blk_mq_insert_request(rq, false);
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2963,6 +2966,7 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||||
struct blk_plug *plug = blk_mq_plug(bio);
|
struct blk_plug *plug = blk_mq_plug(bio);
|
||||||
const int is_sync = op_is_sync(bio->bi_opf);
|
const int is_sync = op_is_sync(bio->bi_opf);
|
||||||
|
struct blk_mq_hw_ctx *hctx;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
unsigned int nr_segs = 1;
|
unsigned int nr_segs = 1;
|
||||||
blk_status_t ret;
|
blk_status_t ret;
|
||||||
@ -3007,15 +3011,19 @@ void blk_mq_submit_bio(struct bio *bio)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (plug)
|
if (plug) {
|
||||||
blk_add_rq_to_plug(plug, rq);
|
blk_add_rq_to_plug(plug, rq);
|
||||||
else if ((rq->rq_flags & RQF_ELV) ||
|
return;
|
||||||
(rq->mq_hctx->dispatch_busy &&
|
}
|
||||||
(q->nr_hw_queues == 1 || !is_sync)))
|
|
||||||
blk_mq_insert_request(rq, false, true, true);
|
hctx = rq->mq_hctx;
|
||||||
else
|
if ((rq->rq_flags & RQF_ELV) ||
|
||||||
blk_mq_run_dispatch_ops(rq->q,
|
(hctx->dispatch_busy && (q->nr_hw_queues == 1 || !is_sync))) {
|
||||||
blk_mq_try_issue_directly(rq->mq_hctx, rq));
|
blk_mq_insert_request(rq, false);
|
||||||
|
blk_mq_run_hw_queue(hctx, true);
|
||||||
|
} else {
|
||||||
|
blk_mq_run_dispatch_ops(q, blk_mq_try_issue_directly(hctx, rq));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_MQ_STACKING
|
#ifdef CONFIG_BLK_MQ_STACKING
|
||||||
|
Loading…
Reference in New Issue
Block a user