mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
blk-mq: support multiple hctx maps
Add support for the tag set carrying multiple queue maps, and for the driver to inform blk-mq how many it wishes to support through setting set->nr_maps. This adds an mq_ops helper for drivers that support more than 1 map, mq_ops->rq_flags_to_type(). The function takes request/bio flags and CPU, and returns a queue map index for that. We then use the type information in blk_mq_map_queue() to index the map set. Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: Keith Busch <keith.busch@intel.com> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a783b81820
commit
b3c661b15d
@ -2258,7 +2258,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
|||||||
static void blk_mq_init_cpu_queues(struct request_queue *q,
|
static void blk_mq_init_cpu_queues(struct request_queue *q,
|
||||||
unsigned int nr_hw_queues)
|
unsigned int nr_hw_queues)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
struct blk_mq_tag_set *set = q->tag_set;
|
||||||
|
unsigned int i, j;
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
|
struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
|
||||||
@ -2273,9 +2274,11 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
|
|||||||
* Set local node, IFF we have more than one hw queue. If
|
* Set local node, IFF we have more than one hw queue. If
|
||||||
* not, we remain on the home node of the device
|
* not, we remain on the home node of the device
|
||||||
*/
|
*/
|
||||||
hctx = blk_mq_map_queue_type(q, 0, i);
|
for (j = 0; j < set->nr_maps; j++) {
|
||||||
if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
|
hctx = blk_mq_map_queue_type(q, j, i);
|
||||||
hctx->numa_node = local_memory_node(cpu_to_node(i));
|
if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
|
||||||
|
hctx->numa_node = local_memory_node(cpu_to_node(i));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2310,7 +2313,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
|
|||||||
|
|
||||||
static void blk_mq_map_swqueue(struct request_queue *q)
|
static void blk_mq_map_swqueue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
unsigned int i, hctx_idx;
|
unsigned int i, j, hctx_idx;
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
struct blk_mq_ctx *ctx;
|
struct blk_mq_ctx *ctx;
|
||||||
struct blk_mq_tag_set *set = q->tag_set;
|
struct blk_mq_tag_set *set = q->tag_set;
|
||||||
@ -2346,17 +2349,28 @@ static void blk_mq_map_swqueue(struct request_queue *q)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctx = per_cpu_ptr(q->queue_ctx, i);
|
ctx = per_cpu_ptr(q->queue_ctx, i);
|
||||||
hctx = blk_mq_map_queue_type(q, 0, i);
|
for (j = 0; j < set->nr_maps; j++) {
|
||||||
hctx->type = 0;
|
hctx = blk_mq_map_queue_type(q, j, i);
|
||||||
cpumask_set_cpu(i, hctx->cpumask);
|
|
||||||
ctx->index_hw[hctx->type] = hctx->nr_ctx;
|
|
||||||
hctx->ctxs[hctx->nr_ctx++] = ctx;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the nr_ctx type overflows, we have exceeded the
|
* If the CPU is already set in the mask, then we've
|
||||||
* amount of sw queues we can support.
|
* mapped this one already. This can happen if
|
||||||
*/
|
* devices share queues across queue maps.
|
||||||
BUG_ON(!hctx->nr_ctx);
|
*/
|
||||||
|
if (cpumask_test_cpu(i, hctx->cpumask))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
cpumask_set_cpu(i, hctx->cpumask);
|
||||||
|
hctx->type = j;
|
||||||
|
ctx->index_hw[hctx->type] = hctx->nr_ctx;
|
||||||
|
hctx->ctxs[hctx->nr_ctx++] = ctx;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the nr_ctx type overflows, we have exceeded the
|
||||||
|
* amount of sw queues we can support.
|
||||||
|
*/
|
||||||
|
BUG_ON(!hctx->nr_ctx);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&q->sysfs_lock);
|
mutex_unlock(&q->sysfs_lock);
|
||||||
@ -2524,6 +2538,7 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
|
|||||||
memset(set, 0, sizeof(*set));
|
memset(set, 0, sizeof(*set));
|
||||||
set->ops = ops;
|
set->ops = ops;
|
||||||
set->nr_hw_queues = 1;
|
set->nr_hw_queues = 1;
|
||||||
|
set->nr_maps = 1;
|
||||||
set->queue_depth = queue_depth;
|
set->queue_depth = queue_depth;
|
||||||
set->numa_node = NUMA_NO_NODE;
|
set->numa_node = NUMA_NO_NODE;
|
||||||
set->flags = set_flags;
|
set->flags = set_flags;
|
||||||
@ -2800,6 +2815,8 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
|
|||||||
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
|
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
if (set->ops->map_queues) {
|
if (set->ops->map_queues) {
|
||||||
|
int i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* transport .map_queues is usually done in the following
|
* transport .map_queues is usually done in the following
|
||||||
* way:
|
* way:
|
||||||
@ -2807,18 +2824,21 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
|
|||||||
* for (queue = 0; queue < set->nr_hw_queues; queue++) {
|
* for (queue = 0; queue < set->nr_hw_queues; queue++) {
|
||||||
* mask = get_cpu_mask(queue)
|
* mask = get_cpu_mask(queue)
|
||||||
* for_each_cpu(cpu, mask)
|
* for_each_cpu(cpu, mask)
|
||||||
* set->map.mq_map[cpu] = queue;
|
* set->map[x].mq_map[cpu] = queue;
|
||||||
* }
|
* }
|
||||||
*
|
*
|
||||||
* When we need to remap, the table has to be cleared for
|
* When we need to remap, the table has to be cleared for
|
||||||
* killing stale mapping since one CPU may not be mapped
|
* killing stale mapping since one CPU may not be mapped
|
||||||
* to any hw queue.
|
* to any hw queue.
|
||||||
*/
|
*/
|
||||||
blk_mq_clear_mq_map(&set->map[0]);
|
for (i = 0; i < set->nr_maps; i++)
|
||||||
|
blk_mq_clear_mq_map(&set->map[i]);
|
||||||
|
|
||||||
return set->ops->map_queues(set);
|
return set->ops->map_queues(set);
|
||||||
} else
|
} else {
|
||||||
|
BUG_ON(set->nr_maps > 1);
|
||||||
return blk_mq_map_queues(&set->map[0]);
|
return blk_mq_map_queues(&set->map[0]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2829,7 +2849,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
|
|||||||
*/
|
*/
|
||||||
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
int ret;
|
int i, ret;
|
||||||
|
|
||||||
BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
|
BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
|
||||||
|
|
||||||
@ -2852,6 +2872,11 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
|||||||
set->queue_depth = BLK_MQ_MAX_DEPTH;
|
set->queue_depth = BLK_MQ_MAX_DEPTH;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!set->nr_maps)
|
||||||
|
set->nr_maps = 1;
|
||||||
|
else if (set->nr_maps > HCTX_MAX_TYPES)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a crashdump is active, then we are potentially in a very
|
* If a crashdump is active, then we are potentially in a very
|
||||||
* memory constrained environment. Limit us to 1 queue and
|
* memory constrained environment. Limit us to 1 queue and
|
||||||
@ -2873,12 +2898,14 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
set->map[0].mq_map = kcalloc_node(nr_cpu_ids,
|
for (i = 0; i < set->nr_maps; i++) {
|
||||||
sizeof(*set->map[0].mq_map),
|
set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
|
||||||
GFP_KERNEL, set->numa_node);
|
sizeof(struct blk_mq_queue_map),
|
||||||
if (!set->map[0].mq_map)
|
GFP_KERNEL, set->numa_node);
|
||||||
goto out_free_tags;
|
if (!set->map[i].mq_map)
|
||||||
set->map[0].nr_queues = set->nr_hw_queues;
|
goto out_free_mq_map;
|
||||||
|
set->map[i].nr_queues = set->nr_hw_queues;
|
||||||
|
}
|
||||||
|
|
||||||
ret = blk_mq_update_queue_map(set);
|
ret = blk_mq_update_queue_map(set);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -2894,9 +2921,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_mq_map:
|
out_free_mq_map:
|
||||||
kfree(set->map[0].mq_map);
|
for (i = 0; i < set->nr_maps; i++) {
|
||||||
set->map[0].mq_map = NULL;
|
kfree(set->map[i].mq_map);
|
||||||
out_free_tags:
|
set->map[i].mq_map = NULL;
|
||||||
|
}
|
||||||
kfree(set->tags);
|
kfree(set->tags);
|
||||||
set->tags = NULL;
|
set->tags = NULL;
|
||||||
return ret;
|
return ret;
|
||||||
@ -2905,13 +2933,15 @@ EXPORT_SYMBOL(blk_mq_alloc_tag_set);
|
|||||||
|
|
||||||
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
|
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
|
||||||
{
|
{
|
||||||
int i;
|
int i, j;
|
||||||
|
|
||||||
for (i = 0; i < nr_cpu_ids; i++)
|
for (i = 0; i < nr_cpu_ids; i++)
|
||||||
blk_mq_free_map_and_requests(set, i);
|
blk_mq_free_map_and_requests(set, i);
|
||||||
|
|
||||||
kfree(set->map[0].mq_map);
|
for (j = 0; j < set->nr_maps; j++) {
|
||||||
set->map[0].mq_map = NULL;
|
kfree(set->map[j].mq_map);
|
||||||
|
set->map[j].mq_map = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
kfree(set->tags);
|
kfree(set->tags);
|
||||||
set->tags = NULL;
|
set->tags = NULL;
|
||||||
|
@ -72,20 +72,37 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
|||||||
*/
|
*/
|
||||||
extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
|
extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
|
||||||
|
|
||||||
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
|
/*
|
||||||
unsigned int flags,
|
* blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
|
||||||
unsigned int cpu)
|
* @q: request queue
|
||||||
{
|
* @hctx_type: the hctx type index
|
||||||
struct blk_mq_tag_set *set = q->tag_set;
|
* @cpu: CPU
|
||||||
|
*/
|
||||||
return q->queue_hw_ctx[set->map[0].mq_map[cpu]];
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
|
static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
|
||||||
unsigned int hctx_type,
|
unsigned int hctx_type,
|
||||||
unsigned int cpu)
|
unsigned int cpu)
|
||||||
{
|
{
|
||||||
return blk_mq_map_queue(q, hctx_type, cpu);
|
struct blk_mq_tag_set *set = q->tag_set;
|
||||||
|
|
||||||
|
return q->queue_hw_ctx[set->map[hctx_type].mq_map[cpu]];
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
|
||||||
|
* @q: request queue
|
||||||
|
* @flags: request command flags
|
||||||
|
* @cpu: CPU
|
||||||
|
*/
|
||||||
|
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
|
||||||
|
unsigned int flags,
|
||||||
|
unsigned int cpu)
|
||||||
|
{
|
||||||
|
int hctx_type = 0;
|
||||||
|
|
||||||
|
if (q->mq_ops->rq_flags_to_type)
|
||||||
|
hctx_type = q->mq_ops->rq_flags_to_type(q, flags);
|
||||||
|
|
||||||
|
return blk_mq_map_queue_type(q, hctx_type, cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -85,7 +85,14 @@ enum {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct blk_mq_tag_set {
|
struct blk_mq_tag_set {
|
||||||
|
/*
|
||||||
|
* map[] holds ctx -> hctx mappings, one map exists for each type
|
||||||
|
* that the driver wishes to support. There are no restrictions
|
||||||
|
* on maps being of the same size, and it's perfectly legal to
|
||||||
|
* share maps between types.
|
||||||
|
*/
|
||||||
struct blk_mq_queue_map map[HCTX_MAX_TYPES];
|
struct blk_mq_queue_map map[HCTX_MAX_TYPES];
|
||||||
|
unsigned int nr_maps; /* nr entries in map[] */
|
||||||
const struct blk_mq_ops *ops;
|
const struct blk_mq_ops *ops;
|
||||||
unsigned int nr_hw_queues; /* nr hw queues across maps */
|
unsigned int nr_hw_queues; /* nr hw queues across maps */
|
||||||
unsigned int queue_depth; /* max hw supported */
|
unsigned int queue_depth; /* max hw supported */
|
||||||
@ -109,6 +116,8 @@ struct blk_mq_queue_data {
|
|||||||
|
|
||||||
typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
|
typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
|
||||||
const struct blk_mq_queue_data *);
|
const struct blk_mq_queue_data *);
|
||||||
|
/* takes rq->cmd_flags as input, returns a hardware type index */
|
||||||
|
typedef int (rq_flags_to_type_fn)(struct request_queue *, unsigned int);
|
||||||
typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
|
typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
|
||||||
typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
|
typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
|
||||||
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
|
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
|
||||||
@ -134,6 +143,11 @@ struct blk_mq_ops {
|
|||||||
*/
|
*/
|
||||||
queue_rq_fn *queue_rq;
|
queue_rq_fn *queue_rq;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return a queue map type for the given request/bio flags
|
||||||
|
*/
|
||||||
|
rq_flags_to_type_fn *rq_flags_to_type;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reserve budget before queue request, once .queue_rq is
|
* Reserve budget before queue request, once .queue_rq is
|
||||||
* run, it is driver's responsibility to release the
|
* run, it is driver's responsibility to release the
|
||||||
|
Loading…
Reference in New Issue
Block a user