mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
blk-mq: pass a flags argument to blk_mq_request_bypass_insert
Replace the boolean at_head argument with the same flags that are already passed to blk_mq_insert_request. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230413064057.707578-19-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
710fa3789e
commit
2b5976134b
@ -428,7 +428,7 @@ void blk_insert_flush(struct request *rq)
|
|||||||
*/
|
*/
|
||||||
if ((policy & REQ_FSEQ_DATA) &&
|
if ((policy & REQ_FSEQ_DATA) &&
|
||||||
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
||||||
blk_mq_request_bypass_insert(rq, false);
|
blk_mq_request_bypass_insert(rq, 0);
|
||||||
blk_mq_run_hw_queue(hctx, false);
|
blk_mq_run_hw_queue(hctx, false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1447,7 +1447,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
|
|||||||
if (rq->rq_flags & RQF_DONTPREP) {
|
if (rq->rq_flags & RQF_DONTPREP) {
|
||||||
rq->rq_flags &= ~RQF_SOFTBARRIER;
|
rq->rq_flags &= ~RQF_SOFTBARRIER;
|
||||||
list_del_init(&rq->queuelist);
|
list_del_init(&rq->queuelist);
|
||||||
blk_mq_request_bypass_insert(rq, false);
|
blk_mq_request_bypass_insert(rq, 0);
|
||||||
} else if (rq->rq_flags & RQF_SOFTBARRIER) {
|
} else if (rq->rq_flags & RQF_SOFTBARRIER) {
|
||||||
rq->rq_flags &= ~RQF_SOFTBARRIER;
|
rq->rq_flags &= ~RQF_SOFTBARRIER;
|
||||||
list_del_init(&rq->queuelist);
|
list_del_init(&rq->queuelist);
|
||||||
@ -2457,17 +2457,17 @@ static void blk_mq_run_work_fn(struct work_struct *work)
|
|||||||
/**
|
/**
|
||||||
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
|
* blk_mq_request_bypass_insert - Insert a request at dispatch list.
|
||||||
* @rq: Pointer to request to be inserted.
|
* @rq: Pointer to request to be inserted.
|
||||||
* @at_head: true if the request should be inserted at the head of the list.
|
* @flags: BLK_MQ_INSERT_*
|
||||||
*
|
*
|
||||||
* Should only be used carefully, when the caller knows we want to
|
* Should only be used carefully, when the caller knows we want to
|
||||||
* bypass a potential IO scheduler on the target device.
|
* bypass a potential IO scheduler on the target device.
|
||||||
*/
|
*/
|
||||||
void blk_mq_request_bypass_insert(struct request *rq, bool at_head)
|
void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||||
|
|
||||||
spin_lock(&hctx->lock);
|
spin_lock(&hctx->lock);
|
||||||
if (at_head)
|
if (flags & BLK_MQ_INSERT_AT_HEAD)
|
||||||
list_add(&rq->queuelist, &hctx->dispatch);
|
list_add(&rq->queuelist, &hctx->dispatch);
|
||||||
else
|
else
|
||||||
list_add_tail(&rq->queuelist, &hctx->dispatch);
|
list_add_tail(&rq->queuelist, &hctx->dispatch);
|
||||||
@ -2526,7 +2526,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
|
|||||||
* and it is added to the scheduler queue, there is no chance to
|
* and it is added to the scheduler queue, there is no chance to
|
||||||
* dispatch it given we prioritize requests in hctx->dispatch.
|
* dispatch it given we prioritize requests in hctx->dispatch.
|
||||||
*/
|
*/
|
||||||
blk_mq_request_bypass_insert(rq, flags & BLK_MQ_INSERT_AT_HEAD);
|
blk_mq_request_bypass_insert(rq, flags);
|
||||||
} else if (rq->rq_flags & RQF_FLUSH_SEQ) {
|
} else if (rq->rq_flags & RQF_FLUSH_SEQ) {
|
||||||
/*
|
/*
|
||||||
* Firstly normal IO request is inserted to scheduler queue or
|
* Firstly normal IO request is inserted to scheduler queue or
|
||||||
@ -2549,7 +2549,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
|
|||||||
* Simply queue flush rq to the front of hctx->dispatch so that
|
* Simply queue flush rq to the front of hctx->dispatch so that
|
||||||
* intensive flush workloads can benefit in case of NCQ HW.
|
* intensive flush workloads can benefit in case of NCQ HW.
|
||||||
*/
|
*/
|
||||||
blk_mq_request_bypass_insert(rq, true);
|
blk_mq_request_bypass_insert(rq, BLK_MQ_INSERT_AT_HEAD);
|
||||||
} else if (q->elevator) {
|
} else if (q->elevator) {
|
||||||
LIST_HEAD(list);
|
LIST_HEAD(list);
|
||||||
|
|
||||||
@ -2670,7 +2670,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|||||||
break;
|
break;
|
||||||
case BLK_STS_RESOURCE:
|
case BLK_STS_RESOURCE:
|
||||||
case BLK_STS_DEV_RESOURCE:
|
case BLK_STS_DEV_RESOURCE:
|
||||||
blk_mq_request_bypass_insert(rq, false);
|
blk_mq_request_bypass_insert(rq, 0);
|
||||||
blk_mq_run_hw_queue(hctx, false);
|
blk_mq_run_hw_queue(hctx, false);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -2718,7 +2718,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
|
|||||||
break;
|
break;
|
||||||
case BLK_STS_RESOURCE:
|
case BLK_STS_RESOURCE:
|
||||||
case BLK_STS_DEV_RESOURCE:
|
case BLK_STS_DEV_RESOURCE:
|
||||||
blk_mq_request_bypass_insert(rq, false);
|
blk_mq_request_bypass_insert(rq, 0);
|
||||||
blk_mq_run_hw_queue(hctx, false);
|
blk_mq_run_hw_queue(hctx, false);
|
||||||
goto out;
|
goto out;
|
||||||
default:
|
default:
|
||||||
@ -2837,7 +2837,7 @@ static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
|||||||
break;
|
break;
|
||||||
case BLK_STS_RESOURCE:
|
case BLK_STS_RESOURCE:
|
||||||
case BLK_STS_DEV_RESOURCE:
|
case BLK_STS_DEV_RESOURCE:
|
||||||
blk_mq_request_bypass_insert(rq, false);
|
blk_mq_request_bypass_insert(rq, 0);
|
||||||
if (list_empty(list))
|
if (list_empty(list))
|
||||||
blk_mq_run_hw_queue(hctx, false);
|
blk_mq_run_hw_queue(hctx, false);
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -67,7 +67,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
|
|||||||
/*
|
/*
|
||||||
* Internal helpers for request insertion into sw queues
|
* Internal helpers for request insertion into sw queues
|
||||||
*/
|
*/
|
||||||
void blk_mq_request_bypass_insert(struct request *rq, bool at_head);
|
void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CPU -> queue mappings
|
* CPU -> queue mappings
|
||||||
|
Loading…
Reference in New Issue
Block a user