mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 10:43:43 +00:00
blk-mq-rdma: remove queue mapping helper for rdma devices
No rdma device exposes its irq vectors affinity today. So the only mapping that we have left, is the default blk_mq_map_queues, which we fallback to anyways. Also fixup the only consumer of this helper (nvme-rdma). Remove this now dead code. Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Acked-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
015ad2b1e4
commit
edde9e70bb
@ -215,11 +215,6 @@ config BLK_MQ_VIRTIO
|
|||||||
depends on VIRTIO
|
depends on VIRTIO
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config BLK_MQ_RDMA
|
|
||||||
bool
|
|
||||||
depends on INFINIBAND
|
|
||||||
default y
|
|
||||||
|
|
||||||
config BLK_PM
|
config BLK_PM
|
||||||
def_bool PM
|
def_bool PM
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@ obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
|
|||||||
obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o
|
obj-$(CONFIG_BLK_DEV_INTEGRITY_T10) += t10-pi.o
|
||||||
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
|
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
|
||||||
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
|
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
|
||||||
obj-$(CONFIG_BLK_MQ_RDMA) += blk-mq-rdma.o
|
|
||||||
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
|
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
|
||||||
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
|
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
|
||||||
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
||||||
|
@ -1,44 +0,0 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
|
||||||
/*
|
|
||||||
* Copyright (c) 2017 Sagi Grimberg.
|
|
||||||
*/
|
|
||||||
#include <linux/blk-mq.h>
|
|
||||||
#include <linux/blk-mq-rdma.h>
|
|
||||||
#include <rdma/ib_verbs.h>
|
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_mq_rdma_map_queues - provide a default queue mapping for rdma device
|
|
||||||
* @map: CPU to hardware queue map.
|
|
||||||
* @dev: rdma device to provide a mapping for.
|
|
||||||
* @first_vec: first interrupt vectors to use for queues (usually 0)
|
|
||||||
*
|
|
||||||
* This function assumes the rdma device @dev has at least as many available
|
|
||||||
* interrupt vetors as @set has queues. It will then query it's affinity mask
|
|
||||||
* and built queue mapping that maps a queue to the CPUs that have irq affinity
|
|
||||||
* for the corresponding vector.
|
|
||||||
*
|
|
||||||
* In case either the driver passed a @dev with less vectors than
|
|
||||||
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
|
|
||||||
* vector, we fallback to the naive mapping.
|
|
||||||
*/
|
|
||||||
void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
|
||||||
struct ib_device *dev, int first_vec)
|
|
||||||
{
|
|
||||||
const struct cpumask *mask;
|
|
||||||
unsigned int queue, cpu;
|
|
||||||
|
|
||||||
for (queue = 0; queue < map->nr_queues; queue++) {
|
|
||||||
mask = ib_get_vector_affinity(dev, first_vec + queue);
|
|
||||||
if (!mask)
|
|
||||||
goto fallback;
|
|
||||||
|
|
||||||
for_each_cpu(cpu, mask)
|
|
||||||
map->mq_map[cpu] = map->queue_offset + queue;
|
|
||||||
}
|
|
||||||
|
|
||||||
return;
|
|
||||||
|
|
||||||
fallback:
|
|
||||||
blk_mq_map_queues(map);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
|
|
@ -12,7 +12,6 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/blk-mq.h>
|
#include <linux/blk-mq.h>
|
||||||
#include <linux/blk-mq-rdma.h>
|
|
||||||
#include <linux/blk-integrity.h>
|
#include <linux/blk-integrity.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
@ -2159,10 +2158,8 @@ static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
|||||||
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
ctrl->io_queues[HCTX_TYPE_DEFAULT];
|
||||||
set->map[HCTX_TYPE_READ].queue_offset = 0;
|
set->map[HCTX_TYPE_READ].queue_offset = 0;
|
||||||
}
|
}
|
||||||
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
|
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
|
||||||
ctrl->device->dev, 0);
|
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
|
||||||
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
|
|
||||||
ctrl->device->dev, 0);
|
|
||||||
|
|
||||||
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
|
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
|
||||||
/* map dedicated poll queues only if we have queues left */
|
/* map dedicated poll queues only if we have queues left */
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
|
||||||
#ifndef _LINUX_BLK_MQ_RDMA_H
|
|
||||||
#define _LINUX_BLK_MQ_RDMA_H
|
|
||||||
|
|
||||||
struct blk_mq_tag_set;
|
|
||||||
struct ib_device;
|
|
||||||
|
|
||||||
void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
|
||||||
struct ib_device *dev, int first_vec);
|
|
||||||
|
|
||||||
#endif /* _LINUX_BLK_MQ_RDMA_H */
|
|
Loading…
Reference in New Issue
Block a user