mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 08:42:10 +00:00
blk-mq: pass a flags argument to elevator_type->insert_requests
Instead of passing a bool at_head, pass down the full flags from the blk_mq_insert_request interface. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20230413064057.707578-20-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
2b5976134b
commit
93fffe16f7
@ -6231,7 +6231,7 @@ static inline void bfq_update_insert_stats(struct request_queue *q,
|
||||
static struct bfq_queue *bfq_init_rq(struct request *rq);
|
||||
|
||||
static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
bool at_head)
|
||||
blk_insert_t flags)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct bfq_data *bfqd = q->elevator->elevator_data;
|
||||
@ -6254,11 +6254,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
|
||||
trace_block_rq_insert(rq);
|
||||
|
||||
if (!bfqq || at_head) {
|
||||
if (at_head)
|
||||
list_add(&rq->queuelist, &bfqd->dispatch);
|
||||
else
|
||||
list_add_tail(&rq->queuelist, &bfqd->dispatch);
|
||||
if (flags & BLK_MQ_INSERT_AT_HEAD) {
|
||||
list_add(&rq->queuelist, &bfqd->dispatch);
|
||||
} else if (!bfqq) {
|
||||
list_add_tail(&rq->queuelist, &bfqd->dispatch);
|
||||
} else {
|
||||
idle_timer_disabled = __bfq_insert_request(bfqd, rq);
|
||||
/*
|
||||
@ -6288,14 +6287,15 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
}
|
||||
|
||||
static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
struct list_head *list, bool at_head)
|
||||
struct list_head *list,
|
||||
blk_insert_t flags)
|
||||
{
|
||||
while (!list_empty(list)) {
|
||||
struct request *rq;
|
||||
|
||||
rq = list_first_entry(list, struct request, queuelist);
|
||||
list_del_init(&rq->queuelist);
|
||||
bfq_insert_request(hctx, rq, at_head);
|
||||
bfq_insert_request(hctx, rq, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2556,8 +2556,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
|
||||
WARN_ON_ONCE(rq->tag != BLK_MQ_NO_TAG);
|
||||
|
||||
list_add(&rq->queuelist, &list);
|
||||
q->elevator->type->ops.insert_requests(hctx, &list,
|
||||
flags & BLK_MQ_INSERT_AT_HEAD);
|
||||
q->elevator->type->ops.insert_requests(hctx, &list, flags);
|
||||
} else {
|
||||
trace_block_rq_insert(rq);
|
||||
|
||||
@ -2768,7 +2767,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
|
||||
percpu_ref_get(&this_hctx->queue->q_usage_counter);
|
||||
if (this_hctx->queue->elevator) {
|
||||
this_hctx->queue->elevator->type->ops.insert_requests(this_hctx,
|
||||
&list, false);
|
||||
&list, 0);
|
||||
blk_mq_run_hw_queue(this_hctx, from_sched);
|
||||
} else {
|
||||
blk_mq_insert_requests(this_hctx, this_ctx, &list, from_sched);
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include "blk-mq.h"
|
||||
|
||||
struct io_cq;
|
||||
struct elevator_type;
|
||||
@ -37,7 +38,8 @@ struct elevator_mq_ops {
|
||||
void (*limit_depth)(blk_opf_t, struct blk_mq_alloc_data *);
|
||||
void (*prepare_request)(struct request *);
|
||||
void (*finish_request)(struct request *);
|
||||
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
|
||||
void (*insert_requests)(struct blk_mq_hw_ctx *hctx, struct list_head *list,
|
||||
blk_insert_t flags);
|
||||
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
|
||||
bool (*has_work)(struct blk_mq_hw_ctx *);
|
||||
void (*completed_request)(struct request *, u64);
|
||||
|
@ -588,7 +588,8 @@ static void kyber_prepare_request(struct request *rq)
|
||||
}
|
||||
|
||||
static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
struct list_head *rq_list, bool at_head)
|
||||
struct list_head *rq_list,
|
||||
blk_insert_t flags)
|
||||
{
|
||||
struct kyber_hctx_data *khd = hctx->sched_data;
|
||||
struct request *rq, *next;
|
||||
@ -600,7 +601,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
spin_lock(&kcq->lock);
|
||||
trace_block_rq_insert(rq);
|
||||
if (at_head)
|
||||
if (flags & BLK_MQ_INSERT_AT_HEAD)
|
||||
list_move(&rq->queuelist, head);
|
||||
else
|
||||
list_move_tail(&rq->queuelist, head);
|
||||
|
@ -766,7 +766,7 @@ static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
|
||||
* add rq to rbtree and fifo
|
||||
*/
|
||||
static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
bool at_head)
|
||||
blk_insert_t flags)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
@ -799,7 +799,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
|
||||
trace_block_rq_insert(rq);
|
||||
|
||||
if (at_head) {
|
||||
if (flags & BLK_MQ_INSERT_AT_HEAD) {
|
||||
list_add(&rq->queuelist, &per_prio->dispatch);
|
||||
rq->fifo_time = jiffies;
|
||||
} else {
|
||||
@ -823,7 +823,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
* Called from blk_mq_insert_request() or blk_mq_dispatch_plug_list().
|
||||
*/
|
||||
static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
struct list_head *list, bool at_head)
|
||||
struct list_head *list,
|
||||
blk_insert_t flags)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
@ -834,7 +835,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
rq = list_first_entry(list, struct request, queuelist);
|
||||
list_del_init(&rq->queuelist);
|
||||
dd_insert_request(hctx, rq, at_head);
|
||||
dd_insert_request(hctx, rq, flags);
|
||||
}
|
||||
spin_unlock(&dd->lock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user