mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 00:35:01 +00:00
block: add a rq_list type
Replace the semi-open coded request list helpers with a proper rq_list type that mirrors the bio_list and has head and tail pointers. Besides better type safety this actually allows to insert at the tail of the list, which will be useful soon. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20241113152050.157179-5-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
e8225ab150
commit
a3396b9999
@ -1120,8 +1120,8 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
|
||||
return;
|
||||
|
||||
plug->cur_ktime = 0;
|
||||
plug->mq_list = NULL;
|
||||
plug->cached_rq = NULL;
|
||||
rq_list_init(&plug->mq_list);
|
||||
rq_list_init(&plug->cached_rqs);
|
||||
plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
|
||||
plug->rq_count = 0;
|
||||
plug->multiple_queues = false;
|
||||
@ -1217,7 +1217,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
|
||||
* queue for cached requests, we don't want a blocked task holding
|
||||
* up a queue freeze/quiesce event.
|
||||
*/
|
||||
if (unlikely(!rq_list_empty(plug->cached_rq)))
|
||||
if (unlikely(!rq_list_empty(&plug->cached_rqs)))
|
||||
blk_mq_free_plug_rqs(plug);
|
||||
|
||||
plug->cur_ktime = 0;
|
||||
|
@ -1179,7 +1179,7 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
|
||||
struct blk_plug *plug = current->plug;
|
||||
struct request *rq;
|
||||
|
||||
if (!plug || rq_list_empty(plug->mq_list))
|
||||
if (!plug || rq_list_empty(&plug->mq_list))
|
||||
return false;
|
||||
|
||||
rq_list_for_each(&plug->mq_list, rq) {
|
||||
|
@ -478,7 +478,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
|
||||
prefetch(tags->static_rqs[tag]);
|
||||
tag_mask &= ~(1UL << i);
|
||||
rq = blk_mq_rq_ctx_init(data, tags, tag);
|
||||
rq_list_add(data->cached_rq, rq);
|
||||
rq_list_add_head(data->cached_rqs, rq);
|
||||
nr++;
|
||||
}
|
||||
if (!(data->rq_flags & RQF_SCHED_TAGS))
|
||||
@ -487,7 +487,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
|
||||
percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
|
||||
data->nr_tags -= nr;
|
||||
|
||||
return rq_list_pop(data->cached_rq);
|
||||
return rq_list_pop(data->cached_rqs);
|
||||
}
|
||||
|
||||
static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
|
||||
@ -584,7 +584,7 @@ static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
|
||||
.flags = flags,
|
||||
.cmd_flags = opf,
|
||||
.nr_tags = plug->nr_ios,
|
||||
.cached_rq = &plug->cached_rq,
|
||||
.cached_rqs = &plug->cached_rqs,
|
||||
};
|
||||
struct request *rq;
|
||||
|
||||
@ -609,14 +609,14 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
|
||||
if (!plug)
|
||||
return NULL;
|
||||
|
||||
if (rq_list_empty(plug->cached_rq)) {
|
||||
if (rq_list_empty(&plug->cached_rqs)) {
|
||||
if (plug->nr_ios == 1)
|
||||
return NULL;
|
||||
rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
|
||||
if (!rq)
|
||||
return NULL;
|
||||
} else {
|
||||
rq = rq_list_peek(&plug->cached_rq);
|
||||
rq = rq_list_peek(&plug->cached_rqs);
|
||||
if (!rq || rq->q != q)
|
||||
return NULL;
|
||||
|
||||
@ -625,7 +625,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
|
||||
if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
|
||||
return NULL;
|
||||
|
||||
plug->cached_rq = rq_list_next(rq);
|
||||
rq_list_pop(&plug->cached_rqs);
|
||||
blk_mq_rq_time_init(rq, blk_time_get_ns());
|
||||
}
|
||||
|
||||
@ -802,7 +802,7 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
while ((rq = rq_list_pop(&plug->cached_rq)) != NULL)
|
||||
while ((rq = rq_list_pop(&plug->cached_rqs)) != NULL)
|
||||
blk_mq_free_request(rq);
|
||||
}
|
||||
|
||||
@ -1392,8 +1392,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
|
||||
*/
|
||||
if (!plug->has_elevator && (rq->rq_flags & RQF_SCHED_TAGS))
|
||||
plug->has_elevator = true;
|
||||
rq->rq_next = NULL;
|
||||
rq_list_add(&plug->mq_list, rq);
|
||||
rq_list_add_head(&plug->mq_list, rq);
|
||||
plug->rq_count++;
|
||||
}
|
||||
|
||||
@ -2785,7 +2784,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug)
|
||||
blk_status_t ret = BLK_STS_OK;
|
||||
|
||||
while ((rq = rq_list_pop(&plug->mq_list))) {
|
||||
bool last = rq_list_empty(plug->mq_list);
|
||||
bool last = rq_list_empty(&plug->mq_list);
|
||||
|
||||
if (hctx != rq->mq_hctx) {
|
||||
if (hctx) {
|
||||
@ -2828,8 +2827,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
|
||||
{
|
||||
struct blk_mq_hw_ctx *this_hctx = NULL;
|
||||
struct blk_mq_ctx *this_ctx = NULL;
|
||||
struct request *requeue_list = NULL;
|
||||
struct request **requeue_lastp = &requeue_list;
|
||||
struct rq_list requeue_list = {};
|
||||
unsigned int depth = 0;
|
||||
bool is_passthrough = false;
|
||||
LIST_HEAD(list);
|
||||
@ -2843,12 +2841,12 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
|
||||
is_passthrough = blk_rq_is_passthrough(rq);
|
||||
} else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx ||
|
||||
is_passthrough != blk_rq_is_passthrough(rq)) {
|
||||
rq_list_add_tail(&requeue_lastp, rq);
|
||||
rq_list_add_tail(&requeue_list, rq);
|
||||
continue;
|
||||
}
|
||||
list_add(&rq->queuelist, &list);
|
||||
depth++;
|
||||
} while (!rq_list_empty(plug->mq_list));
|
||||
} while (!rq_list_empty(&plug->mq_list));
|
||||
|
||||
plug->mq_list = requeue_list;
|
||||
trace_block_unplug(this_hctx->queue, depth, !from_sched);
|
||||
@ -2903,19 +2901,19 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
if (q->mq_ops->queue_rqs) {
|
||||
blk_mq_run_dispatch_ops(q,
|
||||
__blk_mq_flush_plug_list(q, plug));
|
||||
if (rq_list_empty(plug->mq_list))
|
||||
if (rq_list_empty(&plug->mq_list))
|
||||
return;
|
||||
}
|
||||
|
||||
blk_mq_run_dispatch_ops(q,
|
||||
blk_mq_plug_issue_direct(plug));
|
||||
if (rq_list_empty(plug->mq_list))
|
||||
if (rq_list_empty(&plug->mq_list))
|
||||
return;
|
||||
}
|
||||
|
||||
do {
|
||||
blk_mq_dispatch_plug_list(plug, from_schedule);
|
||||
} while (!rq_list_empty(plug->mq_list));
|
||||
} while (!rq_list_empty(&plug->mq_list));
|
||||
}
|
||||
|
||||
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||
@ -2980,7 +2978,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
|
||||
if (plug) {
|
||||
data.nr_tags = plug->nr_ios;
|
||||
plug->nr_ios = 1;
|
||||
data.cached_rq = &plug->cached_rq;
|
||||
data.cached_rqs = &plug->cached_rqs;
|
||||
}
|
||||
|
||||
rq = __blk_mq_alloc_requests(&data);
|
||||
@ -3003,7 +3001,7 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
|
||||
|
||||
if (!plug)
|
||||
return NULL;
|
||||
rq = rq_list_peek(&plug->cached_rq);
|
||||
rq = rq_list_peek(&plug->cached_rqs);
|
||||
if (!rq || rq->q != q)
|
||||
return NULL;
|
||||
if (type != rq->mq_hctx->type &&
|
||||
@ -3017,14 +3015,14 @@ static struct request *blk_mq_peek_cached_request(struct blk_plug *plug,
|
||||
static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug,
|
||||
struct bio *bio)
|
||||
{
|
||||
WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
|
||||
if (rq_list_pop(&plug->cached_rqs) != rq)
|
||||
WARN_ON_ONCE(1);
|
||||
|
||||
/*
|
||||
* If any qos ->throttle() end up blocking, we will have flushed the
|
||||
* plug and hence killed the cached_rq list as well. Pop this entry
|
||||
* before we throttle.
|
||||
*/
|
||||
plug->cached_rq = rq_list_next(rq);
|
||||
rq_qos_throttle(rq->q, bio);
|
||||
|
||||
blk_mq_rq_time_init(rq, blk_time_get_ns());
|
||||
|
@ -155,7 +155,7 @@ struct blk_mq_alloc_data {
|
||||
|
||||
/* allocate multiple requests/tags in one go */
|
||||
unsigned int nr_tags;
|
||||
struct request **cached_rq;
|
||||
struct rq_list *cached_rqs;
|
||||
|
||||
/* input & output parameter */
|
||||
struct blk_mq_ctx *ctx;
|
||||
|
@ -1638,10 +1638,9 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static void null_queue_rqs(struct request **rqlist)
|
||||
static void null_queue_rqs(struct rq_list *rqlist)
|
||||
{
|
||||
struct request *requeue_list = NULL;
|
||||
struct request **requeue_lastp = &requeue_list;
|
||||
struct rq_list requeue_list = {};
|
||||
struct blk_mq_queue_data bd = { };
|
||||
blk_status_t ret;
|
||||
|
||||
@ -1651,8 +1650,8 @@ static void null_queue_rqs(struct request **rqlist)
|
||||
bd.rq = rq;
|
||||
ret = null_queue_rq(rq->mq_hctx, &bd);
|
||||
if (ret != BLK_STS_OK)
|
||||
rq_list_add_tail(&requeue_lastp, rq);
|
||||
} while (!rq_list_empty(*rqlist));
|
||||
rq_list_add_tail(&requeue_list, rq);
|
||||
} while (!rq_list_empty(rqlist));
|
||||
|
||||
*rqlist = requeue_list;
|
||||
}
|
||||
|
@ -472,7 +472,7 @@ static bool virtblk_prep_rq_batch(struct request *req)
|
||||
}
|
||||
|
||||
static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
|
||||
struct request **rqlist)
|
||||
struct rq_list *rqlist)
|
||||
{
|
||||
struct request *req;
|
||||
unsigned long flags;
|
||||
@ -499,11 +499,10 @@ static void virtblk_add_req_batch(struct virtio_blk_vq *vq,
|
||||
virtqueue_notify(vq->vq);
|
||||
}
|
||||
|
||||
static void virtio_queue_rqs(struct request **rqlist)
|
||||
static void virtio_queue_rqs(struct rq_list *rqlist)
|
||||
{
|
||||
struct request *submit_list = NULL;
|
||||
struct request *requeue_list = NULL;
|
||||
struct request **requeue_lastp = &requeue_list;
|
||||
struct rq_list submit_list = { };
|
||||
struct rq_list requeue_list = { };
|
||||
struct virtio_blk_vq *vq = NULL;
|
||||
struct request *req;
|
||||
|
||||
@ -515,9 +514,9 @@ static void virtio_queue_rqs(struct request **rqlist)
|
||||
vq = this_vq;
|
||||
|
||||
if (virtblk_prep_rq_batch(req))
|
||||
rq_list_add(&submit_list, req); /* reverse order */
|
||||
rq_list_add_head(&submit_list, req); /* reverse order */
|
||||
else
|
||||
rq_list_add_tail(&requeue_lastp, req);
|
||||
rq_list_add_tail(&requeue_list, req);
|
||||
}
|
||||
|
||||
if (vq)
|
||||
|
@ -649,7 +649,7 @@ static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
|
||||
|
||||
found = apple_nvme_poll_cq(q, &iob);
|
||||
|
||||
if (!rq_list_empty(iob.req_list))
|
||||
if (!rq_list_empty(&iob.req_list))
|
||||
apple_nvme_complete_batch(&iob);
|
||||
|
||||
return found;
|
||||
|
@ -904,7 +904,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
|
||||
static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist)
|
||||
{
|
||||
struct request *req;
|
||||
|
||||
@ -932,11 +932,10 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
|
||||
return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
|
||||
}
|
||||
|
||||
static void nvme_queue_rqs(struct request **rqlist)
|
||||
static void nvme_queue_rqs(struct rq_list *rqlist)
|
||||
{
|
||||
struct request *submit_list = NULL;
|
||||
struct request *requeue_list = NULL;
|
||||
struct request **requeue_lastp = &requeue_list;
|
||||
struct rq_list submit_list = { };
|
||||
struct rq_list requeue_list = { };
|
||||
struct nvme_queue *nvmeq = NULL;
|
||||
struct request *req;
|
||||
|
||||
@ -946,9 +945,9 @@ static void nvme_queue_rqs(struct request **rqlist)
|
||||
nvmeq = req->mq_hctx->driver_data;
|
||||
|
||||
if (nvme_prep_rq_batch(nvmeq, req))
|
||||
rq_list_add(&submit_list, req); /* reverse order */
|
||||
rq_list_add_head(&submit_list, req); /* reverse order */
|
||||
else
|
||||
rq_list_add_tail(&requeue_lastp, req);
|
||||
rq_list_add_tail(&requeue_list, req);
|
||||
}
|
||||
|
||||
if (nvmeq)
|
||||
@ -1080,7 +1079,7 @@ static irqreturn_t nvme_irq(int irq, void *data)
|
||||
DEFINE_IO_COMP_BATCH(iob);
|
||||
|
||||
if (nvme_poll_cq(nvmeq, &iob)) {
|
||||
if (!rq_list_empty(iob.req_list))
|
||||
if (!rq_list_empty(&iob.req_list))
|
||||
nvme_pci_complete_batch(&iob);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -229,44 +229,60 @@ static inline unsigned short req_get_ioprio(struct request *req)
|
||||
#define rq_dma_dir(rq) \
|
||||
(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
|
||||
|
||||
#define rq_list_add(listptr, rq) do { \
|
||||
(rq)->rq_next = *(listptr); \
|
||||
*(listptr) = rq; \
|
||||
} while (0)
|
||||
static inline int rq_list_empty(const struct rq_list *rl)
|
||||
{
|
||||
return rl->head == NULL;
|
||||
}
|
||||
|
||||
#define rq_list_add_tail(lastpptr, rq) do { \
|
||||
(rq)->rq_next = NULL; \
|
||||
**(lastpptr) = rq; \
|
||||
*(lastpptr) = &rq->rq_next; \
|
||||
} while (0)
|
||||
static inline void rq_list_init(struct rq_list *rl)
|
||||
{
|
||||
rl->head = NULL;
|
||||
rl->tail = NULL;
|
||||
}
|
||||
|
||||
#define rq_list_pop(listptr) \
|
||||
({ \
|
||||
struct request *__req = NULL; \
|
||||
if ((listptr) && *(listptr)) { \
|
||||
__req = *(listptr); \
|
||||
*(listptr) = __req->rq_next; \
|
||||
} \
|
||||
__req; \
|
||||
})
|
||||
static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq)
|
||||
{
|
||||
rq->rq_next = NULL;
|
||||
if (rl->tail)
|
||||
rl->tail->rq_next = rq;
|
||||
else
|
||||
rl->head = rq;
|
||||
rl->tail = rq;
|
||||
}
|
||||
|
||||
#define rq_list_peek(listptr) \
|
||||
({ \
|
||||
struct request *__req = NULL; \
|
||||
if ((listptr) && *(listptr)) \
|
||||
__req = *(listptr); \
|
||||
__req; \
|
||||
})
|
||||
static inline void rq_list_add_head(struct rq_list *rl, struct request *rq)
|
||||
{
|
||||
rq->rq_next = rl->head;
|
||||
rl->head = rq;
|
||||
if (!rl->tail)
|
||||
rl->tail = rq;
|
||||
}
|
||||
|
||||
#define rq_list_for_each(listptr, pos) \
|
||||
for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
|
||||
static inline struct request *rq_list_pop(struct rq_list *rl)
|
||||
{
|
||||
struct request *rq = rl->head;
|
||||
|
||||
#define rq_list_for_each_safe(listptr, pos, nxt) \
|
||||
for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
|
||||
pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
|
||||
if (rq) {
|
||||
rl->head = rl->head->rq_next;
|
||||
if (!rl->head)
|
||||
rl->tail = NULL;
|
||||
rq->rq_next = NULL;
|
||||
}
|
||||
|
||||
#define rq_list_next(rq) (rq)->rq_next
|
||||
#define rq_list_empty(list) ((list) == (struct request *) NULL)
|
||||
return rq;
|
||||
}
|
||||
|
||||
static inline struct request *rq_list_peek(struct rq_list *rl)
|
||||
{
|
||||
return rl->head;
|
||||
}
|
||||
|
||||
#define rq_list_for_each(rl, pos) \
|
||||
for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next)
|
||||
|
||||
#define rq_list_for_each_safe(rl, pos, nxt) \
|
||||
for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \
|
||||
pos; pos = nxt, nxt = pos ? pos->rq_next : NULL)
|
||||
|
||||
/**
|
||||
* enum blk_eh_timer_return - How the timeout handler should proceed
|
||||
@ -559,7 +575,7 @@ struct blk_mq_ops {
|
||||
* empty the @rqlist completely, then the rest will be queued
|
||||
* individually by the block layer upon return.
|
||||
*/
|
||||
void (*queue_rqs)(struct request **rqlist);
|
||||
void (*queue_rqs)(struct rq_list *rqlist);
|
||||
|
||||
/**
|
||||
* @get_budget: Reserve budget before queue request, once .queue_rq is
|
||||
@ -868,7 +884,7 @@ static inline bool blk_mq_add_to_batch(struct request *req,
|
||||
else if (iob->complete != complete)
|
||||
return false;
|
||||
iob->need_ts |= blk_mq_need_time_stamp(req);
|
||||
rq_list_add(&iob->req_list, req);
|
||||
rq_list_add_head(&iob->req_list, req);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1007,6 +1007,11 @@ extern void blk_put_queue(struct request_queue *);
|
||||
void blk_mark_disk_dead(struct gendisk *disk);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
struct rq_list {
|
||||
struct request *head;
|
||||
struct request *tail;
|
||||
};
|
||||
|
||||
/*
|
||||
* blk_plug permits building a queue of related requests by holding the I/O
|
||||
* fragments for a short period. This allows merging of sequential requests
|
||||
@ -1019,10 +1024,10 @@ void blk_mark_disk_dead(struct gendisk *disk);
|
||||
* blk_flush_plug() is called.
|
||||
*/
|
||||
struct blk_plug {
|
||||
struct request *mq_list; /* blk-mq requests */
|
||||
struct rq_list mq_list; /* blk-mq requests */
|
||||
|
||||
/* if ios_left is > 1, we can batch tag/rq allocations */
|
||||
struct request *cached_rq;
|
||||
struct rq_list cached_rqs;
|
||||
u64 cur_ktime;
|
||||
unsigned short nr_ios;
|
||||
|
||||
@ -1684,7 +1689,7 @@ int bdev_thaw(struct block_device *bdev);
|
||||
void bdev_fput(struct file *bdev_file);
|
||||
|
||||
struct io_comp_batch {
|
||||
struct request *req_list;
|
||||
struct rq_list req_list;
|
||||
bool need_ts;
|
||||
void (*complete)(struct io_comp_batch *);
|
||||
};
|
||||
|
@ -1160,12 +1160,12 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
|
||||
poll_flags |= BLK_POLL_ONESHOT;
|
||||
|
||||
/* iopoll may have completed current req */
|
||||
if (!rq_list_empty(iob.req_list) ||
|
||||
if (!rq_list_empty(&iob.req_list) ||
|
||||
READ_ONCE(req->iopoll_completed))
|
||||
break;
|
||||
}
|
||||
|
||||
if (!rq_list_empty(iob.req_list))
|
||||
if (!rq_list_empty(&iob.req_list))
|
||||
iob.complete(&iob);
|
||||
else if (!pos)
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user