mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
block: move the remaining elv.icq handling to the I/O scheduler
After the prepare side has been moved to the only I/O scheduler that cares, do the same for the cleanup and the NULL initialization. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20211126115817.2087431-9-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
87dd1d63dc
commit
222ee581b8
@ -6569,6 +6569,16 @@ static void bfq_finish_requeue_request(struct request *rq)
|
||||
rq->elv.priv[1] = NULL;
|
||||
}
|
||||
|
||||
static void bfq_finish_request(struct request *rq)
|
||||
{
|
||||
bfq_finish_requeue_request(rq);
|
||||
|
||||
if (rq->elv.icq) {
|
||||
put_io_context(rq->elv.icq->ioc);
|
||||
rq->elv.icq = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes the association between the current task and bfqq, assuming
|
||||
* that bic points to the bfq iocontext of the task.
|
||||
@ -7388,7 +7398,7 @@ static struct elevator_type iosched_bfq_mq = {
|
||||
.limit_depth = bfq_limit_depth,
|
||||
.prepare_request = bfq_prepare_request,
|
||||
.requeue_request = bfq_finish_requeue_request,
|
||||
.finish_request = bfq_finish_requeue_request,
|
||||
.finish_request = bfq_finish_request,
|
||||
.exit_icq = bfq_exit_icq,
|
||||
.insert_requests = bfq_insert_requests,
|
||||
.dispatch_request = bfq_dispatch_request,
|
||||
|
@ -167,6 +167,7 @@ void put_io_context(struct io_context *ioc)
|
||||
if (free_ioc)
|
||||
kmem_cache_free(iocontext_cachep, ioc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(put_io_context);
|
||||
|
||||
/**
|
||||
* put_io_context_active - put active reference on ioc
|
||||
|
@ -400,7 +400,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||
if (rq->rq_flags & RQF_ELV) {
|
||||
struct elevator_queue *e = data->q->elevator;
|
||||
|
||||
rq->elv.icq = NULL;
|
||||
INIT_HLIST_NODE(&rq->hash);
|
||||
RB_CLEAR_NODE(&rq->rb_node);
|
||||
|
||||
@ -631,16 +630,9 @@ void blk_mq_free_request(struct request *rq)
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
|
||||
if (rq->rq_flags & RQF_ELVPRIV) {
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e->type->ops.finish_request)
|
||||
e->type->ops.finish_request(rq);
|
||||
if (rq->elv.icq) {
|
||||
put_io_context(rq->elv.icq->ioc);
|
||||
rq->elv.icq = NULL;
|
||||
}
|
||||
}
|
||||
if ((rq->rq_flags & RQF_ELVPRIV) &&
|
||||
q->elevator->type->ops.finish_request)
|
||||
q->elevator->type->ops.finish_request(rq);
|
||||
|
||||
if (rq->rq_flags & RQF_MQ_INFLIGHT)
|
||||
__blk_mq_dec_active_requests(hctx);
|
||||
|
Loading…
Reference in New Issue
Block a user