mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
blk-mq: allocate flush_rq in blk_mq_init_flush()
It is reasonable to allocate flush req in blk_mq_init_flush(). Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Ming Lei <ming.lei@canonical.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
08e98fc601
commit
1bcb1eada4
@ -472,7 +472,16 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blkdev_issue_flush);
|
EXPORT_SYMBOL(blkdev_issue_flush);
|
||||||
|
|
||||||
void blk_mq_init_flush(struct request_queue *q)
|
int blk_mq_init_flush(struct request_queue *q)
|
||||||
{
|
{
|
||||||
|
struct blk_mq_tag_set *set = q->tag_set;
|
||||||
|
|
||||||
spin_lock_init(&q->mq_flush_lock);
|
spin_lock_init(&q->mq_flush_lock);
|
||||||
|
|
||||||
|
q->flush_rq = kzalloc(round_up(sizeof(struct request) +
|
||||||
|
set->cmd_size, cache_line_size()),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!q->flush_rq)
|
||||||
|
return -ENOMEM;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1848,17 +1848,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|||||||
if (set->ops->complete)
|
if (set->ops->complete)
|
||||||
blk_queue_softirq_done(q, set->ops->complete);
|
blk_queue_softirq_done(q, set->ops->complete);
|
||||||
|
|
||||||
blk_mq_init_flush(q);
|
|
||||||
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
|
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
|
||||||
|
|
||||||
q->flush_rq = kzalloc(round_up(sizeof(struct request) +
|
|
||||||
set->cmd_size, cache_line_size()),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!q->flush_rq)
|
|
||||||
goto err_hw;
|
|
||||||
|
|
||||||
if (blk_mq_init_hw_queues(q, set))
|
if (blk_mq_init_hw_queues(q, set))
|
||||||
goto err_flush_rq;
|
goto err_hw;
|
||||||
|
|
||||||
mutex_lock(&all_q_mutex);
|
mutex_lock(&all_q_mutex);
|
||||||
list_add_tail(&q->all_q_node, &all_q_list);
|
list_add_tail(&q->all_q_node, &all_q_list);
|
||||||
@ -1866,12 +1859,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|||||||
|
|
||||||
blk_mq_add_queue_tag_set(set, q);
|
blk_mq_add_queue_tag_set(set, q);
|
||||||
|
|
||||||
|
if (blk_mq_init_flush(q))
|
||||||
|
goto err_hw_queues;
|
||||||
|
|
||||||
blk_mq_map_swqueue(q);
|
blk_mq_map_swqueue(q);
|
||||||
|
|
||||||
return q;
|
return q;
|
||||||
|
|
||||||
err_flush_rq:
|
err_hw_queues:
|
||||||
kfree(q->flush_rq);
|
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
|
||||||
err_hw:
|
err_hw:
|
||||||
blk_cleanup_queue(q);
|
blk_cleanup_queue(q);
|
||||||
err_hctxs:
|
err_hctxs:
|
||||||
|
@ -27,7 +27,7 @@ struct blk_mq_ctx {
|
|||||||
|
|
||||||
void __blk_mq_complete_request(struct request *rq);
|
void __blk_mq_complete_request(struct request *rq);
|
||||||
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
|
||||||
void blk_mq_init_flush(struct request_queue *q);
|
int blk_mq_init_flush(struct request_queue *q);
|
||||||
void blk_mq_freeze_queue(struct request_queue *q);
|
void blk_mq_freeze_queue(struct request_queue *q);
|
||||||
void blk_mq_free_queue(struct request_queue *q);
|
void blk_mq_free_queue(struct request_queue *q);
|
||||||
void blk_mq_clone_flush_request(struct request *flush_rq,
|
void blk_mq_clone_flush_request(struct request *flush_rq,
|
||||||
|
Loading…
Reference in New Issue
Block a user