mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
2b30fab613
blk_mq_pci_map_queues and blk_mq_virtio_map_queues will create a CPU to hardware queue mapping based on affinity information. These two function share common code and only differ on how the affinity information is retrieved. Also, those functions are located in the block subsystem where it doesn't really fit in. They are virtio and pci subsystem specific. Thus introduce provide a generic mapping function which uses the irq_get_affinity callback from bus_type. Originally idea from Ming Lei <ming.lei@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: John Garry <john.g.garry@oracle.com> Signed-off-by: Daniel Wagner <wagi@kernel.org> Link: https://lore.kernel.org/r/20241202-refactor-blk-affinity-helpers-v6-4-27211e9c2cd5@kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
94 lines
2.2 KiB
C
94 lines
2.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* CPU <-> hardware queue mapping helpers
|
|
*
|
|
* Copyright (C) 2013-2014 Jens Axboe
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/threads.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/group_cpus.h>
|
|
#include <linux/device/bus.h>
|
|
|
|
#include "blk.h"
|
|
#include "blk-mq.h"
|
|
|
|
void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
|
|
{
|
|
const struct cpumask *masks;
|
|
unsigned int queue, cpu;
|
|
|
|
masks = group_cpus_evenly(qmap->nr_queues);
|
|
if (!masks) {
|
|
for_each_possible_cpu(cpu)
|
|
qmap->mq_map[cpu] = qmap->queue_offset;
|
|
return;
|
|
}
|
|
|
|
for (queue = 0; queue < qmap->nr_queues; queue++) {
|
|
for_each_cpu(cpu, &masks[queue])
|
|
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
|
}
|
|
kfree(masks);
|
|
}
|
|
EXPORT_SYMBOL_GPL(blk_mq_map_queues);
|
|
|
|
/**
|
|
* blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index
|
|
* @qmap: CPU to hardware queue map.
|
|
* @index: hardware queue index.
|
|
*
|
|
* We have no quick way of doing reverse lookups. This is only used at
|
|
* queue init time, so runtime isn't important.
|
|
*/
|
|
int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
|
|
{
|
|
int i;
|
|
|
|
for_each_possible_cpu(i) {
|
|
if (index == qmap->mq_map[i])
|
|
return cpu_to_node(i);
|
|
}
|
|
|
|
return NUMA_NO_NODE;
|
|
}
|
|
|
|
/**
|
|
* blk_mq_map_hw_queues - Create CPU to hardware queue mapping
|
|
* @qmap: CPU to hardware queue map
|
|
* @dev: The device to map queues
|
|
* @offset: Queue offset to use for the device
|
|
*
|
|
* Create a CPU to hardware queue mapping in @qmap. The struct bus_type
|
|
* irq_get_affinity callback will be used to retrieve the affinity.
|
|
*/
|
|
void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
|
|
struct device *dev, unsigned int offset)
|
|
|
|
{
|
|
const struct cpumask *mask;
|
|
unsigned int queue, cpu;
|
|
|
|
if (!dev->bus->irq_get_affinity)
|
|
goto fallback;
|
|
|
|
for (queue = 0; queue < qmap->nr_queues; queue++) {
|
|
mask = dev->bus->irq_get_affinity(dev, queue + offset);
|
|
if (!mask)
|
|
goto fallback;
|
|
|
|
for_each_cpu(cpu, mask)
|
|
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
|
}
|
|
|
|
return;
|
|
|
|
fallback:
|
|
WARN_ON_ONCE(qmap->nr_queues > 1);
|
|
blk_mq_clear_mq_map(qmap);
|
|
}
|
|
EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
|