mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 21:53:44 +00:00
blk-mq: Add blk_mq_alloc_map_and_rqs()
Add a function to combine allocating tags and the associated requests, and factor out common patterns to use this new function. Some function only call blk_mq_alloc_map_and_rqs() now, but more functionality will be added later. Also make blk_mq_alloc_rq_map() and blk_mq_alloc_rqs() static since they are only used in blk-mq.c, and finally rename some functions for conciseness and consistency with other function names: - __blk_mq_alloc_map_and_{request -> rqs}() - blk_mq_alloc_{map_and_requests -> set_map_and_rqs}() Suggested-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: John Garry <john.garry@huawei.com> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/1633429419-228500-11-git-send-email-john.garry@huawei.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a7e7388dce
commit
63064be150
@ -519,21 +519,12 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
|
||||
struct blk_mq_hw_ctx *hctx,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
int ret;
|
||||
hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
|
||||
q->nr_requests);
|
||||
|
||||
hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
|
||||
set->reserved_tags, set->flags);
|
||||
if (!hctx->sched_tags)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
|
||||
if (ret) {
|
||||
blk_mq_free_rq_map(hctx->sched_tags, set->flags);
|
||||
hctx->sched_tags = NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* called in queue's release handler, tagset has gone away */
|
||||
|
@ -592,7 +592,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
||||
if (tdepth > tags->nr_tags) {
|
||||
struct blk_mq_tag_set *set = hctx->queue->tag_set;
|
||||
struct blk_mq_tags *new;
|
||||
bool ret;
|
||||
|
||||
if (!can_grow)
|
||||
return -EINVAL;
|
||||
@ -604,15 +603,9 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
|
||||
if (tdepth > MAX_SCHED_RQ)
|
||||
return -EINVAL;
|
||||
|
||||
new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
|
||||
tags->nr_reserved_tags, set->flags);
|
||||
new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
|
||||
if (ret) {
|
||||
blk_mq_free_rq_map(new, set->flags);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
|
||||
blk_mq_free_rq_map(*tagsptr, set->flags);
|
||||
|
@ -2392,11 +2392,11 @@ void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags)
|
||||
blk_mq_free_tags(tags, flags);
|
||||
}
|
||||
|
||||
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
|
||||
unsigned int hctx_idx,
|
||||
unsigned int nr_tags,
|
||||
unsigned int reserved_tags,
|
||||
unsigned int flags)
|
||||
static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
|
||||
unsigned int hctx_idx,
|
||||
unsigned int nr_tags,
|
||||
unsigned int reserved_tags,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct blk_mq_tags *tags;
|
||||
int node;
|
||||
@ -2444,8 +2444,9 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
||||
unsigned int hctx_idx, unsigned int depth)
|
||||
static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
|
||||
struct blk_mq_tags *tags,
|
||||
unsigned int hctx_idx, unsigned int depth)
|
||||
{
|
||||
unsigned int i, j, entries_per_page, max_order = 4;
|
||||
size_t rq_size, left;
|
||||
@ -2856,25 +2857,34 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
|
||||
}
|
||||
}
|
||||
|
||||
static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
|
||||
int hctx_idx)
|
||||
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
|
||||
unsigned int hctx_idx,
|
||||
unsigned int depth)
|
||||
{
|
||||
unsigned int flags = set->flags;
|
||||
int ret = 0;
|
||||
struct blk_mq_tags *tags;
|
||||
int ret;
|
||||
|
||||
set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
|
||||
set->queue_depth, set->reserved_tags, flags);
|
||||
if (!set->tags[hctx_idx])
|
||||
return false;
|
||||
tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags,
|
||||
set->flags);
|
||||
if (!tags)
|
||||
return NULL;
|
||||
|
||||
ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
|
||||
set->queue_depth);
|
||||
if (!ret)
|
||||
return true;
|
||||
ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
|
||||
if (ret) {
|
||||
blk_mq_free_rq_map(tags, set->flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
blk_mq_free_rq_map(set->tags[hctx_idx], flags);
|
||||
set->tags[hctx_idx] = NULL;
|
||||
return false;
|
||||
return tags;
|
||||
}
|
||||
|
||||
static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
|
||||
int hctx_idx)
|
||||
{
|
||||
set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
|
||||
set->queue_depth);
|
||||
|
||||
return set->tags[hctx_idx];
|
||||
}
|
||||
|
||||
static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
|
||||
@ -2919,7 +2929,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
||||
hctx_idx = set->map[j].mq_map[i];
|
||||
/* unmapped hw queue can be remapped after CPU topo changed */
|
||||
if (!set->tags[hctx_idx] &&
|
||||
!__blk_mq_alloc_map_and_request(set, hctx_idx)) {
|
||||
!__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
|
||||
/*
|
||||
* If tags initialization fail for some hctx,
|
||||
* that hctx won't be brought online. In this
|
||||
@ -3352,7 +3362,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||
if (!__blk_mq_alloc_map_and_request(set, i))
|
||||
if (!__blk_mq_alloc_map_and_rqs(set, i))
|
||||
goto out_unwind;
|
||||
cond_resched();
|
||||
}
|
||||
@ -3371,7 +3381,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
|
||||
* may reduce the depth asked for, if memory is tight. set->queue_depth
|
||||
* will be updated to reflect the allocated depth.
|
||||
*/
|
||||
static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set)
|
||||
static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
|
||||
{
|
||||
unsigned int depth;
|
||||
int err;
|
||||
@ -3537,7 +3547,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
||||
if (ret)
|
||||
goto out_free_mq_map;
|
||||
|
||||
ret = blk_mq_alloc_map_and_requests(set);
|
||||
ret = blk_mq_alloc_set_map_and_rqs(set);
|
||||
if (ret)
|
||||
goto out_free_mq_map;
|
||||
|
||||
|
@ -55,13 +55,8 @@ void blk_mq_put_rq_ref(struct request *rq);
|
||||
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
||||
unsigned int hctx_idx);
|
||||
void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
|
||||
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
|
||||
unsigned int hctx_idx,
|
||||
unsigned int nr_tags,
|
||||
unsigned int reserved_tags,
|
||||
unsigned int flags);
|
||||
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
||||
unsigned int hctx_idx, unsigned int depth);
|
||||
struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
|
||||
unsigned int hctx_idx, unsigned int depth);
|
||||
|
||||
/*
|
||||
* Internal helpers for request insertion into sw queues
|
||||
|
Loading…
Reference in New Issue
Block a user