mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
Merge branch 'for-6.14/block' into for-next
* for-6.14/block: blk-mq: remove unused queue mapping helpers virtio: blk/scsi: replace blk_mq_virtio_map_queues with blk_mq_map_hw_queues nvme: replace blk_mq_pci_map_queues with blk_mq_map_hw_queues scsi: replace blk_mq_pci_map_queues with blk_mq_map_hw_queues blk-mq: introduce blk_mq_map_hw_queues virtio: hookup irq_get_affinity callback PCI: hookup irq_get_affinity callback driver core: bus: add irq_get_affinity callback to bus_type
This commit is contained in:
commit
7b8e44457b
@ -27,8 +27,6 @@ bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
|
||||
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
|
||||
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
|
||||
obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
|
||||
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
|
||||
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
|
||||
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/group_cpus.h>
|
||||
#include <linux/device/bus.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
@ -54,3 +55,39 @@ int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
|
||||
|
||||
return NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_mq_map_hw_queues - Create CPU to hardware queue mapping
|
||||
* @qmap: CPU to hardware queue map
|
||||
* @dev: The device to map queues
|
||||
* @offset: Queue offset to use for the device
|
||||
*
|
||||
* Create a CPU to hardware queue mapping in @qmap. The struct bus_type
|
||||
* irq_get_affinity callback will be used to retrieve the affinity.
|
||||
*/
|
||||
void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
|
||||
struct device *dev, unsigned int offset)
|
||||
|
||||
{
|
||||
const struct cpumask *mask;
|
||||
unsigned int queue, cpu;
|
||||
|
||||
if (!dev->bus->irq_get_affinity)
|
||||
goto fallback;
|
||||
|
||||
for (queue = 0; queue < qmap->nr_queues; queue++) {
|
||||
mask = dev->bus->irq_get_affinity(dev, queue + offset);
|
||||
if (!mask)
|
||||
goto fallback;
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
fallback:
|
||||
WARN_ON_ONCE(qmap->nr_queues > 1);
|
||||
blk_mq_clear_mq_map(qmap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_map_hw_queues);
|
||||
|
@ -1,46 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2016 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "blk-mq.h"
|
||||
|
||||
/**
|
||||
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device
|
||||
* @qmap: CPU to hardware queue map.
|
||||
* @pdev: PCI device associated with @set.
|
||||
* @offset: Offset to use for the pci irq vector
|
||||
*
|
||||
* This function assumes the PCI device @pdev has at least as many available
|
||||
* interrupt vectors as @set has queues. It will then query the vector
|
||||
* corresponding to each queue for it's affinity mask and built queue mapping
|
||||
* that maps a queue to the CPUs that have irq affinity for the corresponding
|
||||
* vector.
|
||||
*/
|
||||
void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
||||
int offset)
|
||||
{
|
||||
const struct cpumask *mask;
|
||||
unsigned int queue, cpu;
|
||||
|
||||
for (queue = 0; queue < qmap->nr_queues; queue++) {
|
||||
mask = pci_irq_get_affinity(pdev, queue + offset);
|
||||
if (!mask)
|
||||
goto fallback;
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
fallback:
|
||||
WARN_ON_ONCE(qmap->nr_queues > 1);
|
||||
blk_mq_clear_mq_map(qmap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
|
@ -1,46 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2016 Christoph Hellwig.
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/blk-mq-virtio.h>
|
||||
#include <linux/virtio_config.h>
|
||||
#include <linux/module.h>
|
||||
#include "blk-mq.h"
|
||||
|
||||
/**
|
||||
* blk_mq_virtio_map_queues - provide a default queue mapping for virtio device
|
||||
* @qmap: CPU to hardware queue map.
|
||||
* @vdev: virtio device to provide a mapping for.
|
||||
* @first_vec: first interrupt vectors to use for queues (usually 0)
|
||||
*
|
||||
* This function assumes the virtio device @vdev has at least as many available
|
||||
* interrupt vectors as @set has queues. It will then query the vector
|
||||
* corresponding to each queue for it's affinity mask and built queue mapping
|
||||
* that maps a queue to the CPUs that have irq affinity for the corresponding
|
||||
* vector.
|
||||
*/
|
||||
void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
||||
struct virtio_device *vdev, int first_vec)
|
||||
{
|
||||
const struct cpumask *mask;
|
||||
unsigned int queue, cpu;
|
||||
|
||||
if (!vdev->config->get_vq_affinity)
|
||||
goto fallback;
|
||||
|
||||
for (queue = 0; queue < qmap->nr_queues; queue++) {
|
||||
mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
|
||||
if (!mask)
|
||||
goto fallback;
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
qmap->mq_map[cpu] = qmap->queue_offset + queue;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
fallback:
|
||||
blk_mq_map_queues(qmap);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
|
@ -13,7 +13,6 @@
|
||||
#include <linux/string_helpers.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-virtio.h>
|
||||
#include <linux/numa.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <uapi/linux/virtio_ring.h>
|
||||
@ -1181,7 +1180,8 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
|
||||
if (i == HCTX_TYPE_POLL)
|
||||
blk_mq_map_queues(&set->map[i]);
|
||||
else
|
||||
blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
|
||||
blk_mq_map_hw_queues(&set->map[i],
|
||||
&vblk->vdev->dev, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/nvme-fc.h>
|
||||
#include "fc.h"
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
|
||||
/* *************************** Data Structures/Defines ****************** */
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <linux/async.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/blk-integrity.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/init.h>
|
||||
@ -463,7 +462,7 @@ static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
|
||||
*/
|
||||
map->queue_offset = qoff;
|
||||
if (i != HCTX_TYPE_POLL && offset)
|
||||
blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
|
||||
blk_mq_map_hw_queues(map, dev->dev, offset);
|
||||
else
|
||||
blk_mq_map_queues(map);
|
||||
qoff += map->nr_queues;
|
||||
|
@ -1670,6 +1670,19 @@ static void pci_dma_cleanup(struct device *dev)
|
||||
iommu_device_unuse_default_domain(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* pci_device_irq_get_affinity - get IRQ affinity mask for device
|
||||
* @dev: ptr to dev structure
|
||||
* @irq_vec: interrupt vector number
|
||||
*
|
||||
* Return the CPU affinity mask for @dev and @irq_vec.
|
||||
*/
|
||||
static const struct cpumask *pci_device_irq_get_affinity(struct device *dev,
|
||||
unsigned int irq_vec)
|
||||
{
|
||||
return pci_irq_get_affinity(to_pci_dev(dev), irq_vec);
|
||||
}
|
||||
|
||||
const struct bus_type pci_bus_type = {
|
||||
.name = "pci",
|
||||
.match = pci_bus_match,
|
||||
@ -1677,6 +1690,7 @@ const struct bus_type pci_bus_type = {
|
||||
.probe = pci_device_probe,
|
||||
.remove = pci_device_remove,
|
||||
.shutdown = pci_device_shutdown,
|
||||
.irq_get_affinity = pci_device_irq_get_affinity,
|
||||
.dev_groups = pci_dev_groups,
|
||||
.bus_groups = pci_bus_groups,
|
||||
.drv_groups = pci_drv_groups,
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <scsi/fc/fc_fip.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
@ -601,7 +600,7 @@ void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
|
||||
return;
|
||||
}
|
||||
|
||||
blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET);
|
||||
blk_mq_map_hw_queues(qmap, &l_pdev->dev, FNIC_PCI_OFFSET);
|
||||
}
|
||||
|
||||
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/dmapool.h>
|
||||
|
@ -3328,7 +3328,7 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
|
||||
if (i == HCTX_TYPE_POLL)
|
||||
blk_mq_map_queues(qmap);
|
||||
else
|
||||
blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
|
||||
blk_mq_map_hw_queues(qmap, hisi_hba->dev,
|
||||
BASE_VECTORS_V3_HW);
|
||||
qoff += qmap->nr_queues;
|
||||
}
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include <linux/poll.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/irq_poll.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
@ -3193,7 +3192,7 @@ static void megasas_map_queues(struct Scsi_Host *shost)
|
||||
map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
||||
map->nr_queues = instance->msix_vectors - offset;
|
||||
map->queue_offset = 0;
|
||||
blk_mq_pci_map_queues(map, instance->pdev, offset);
|
||||
blk_mq_map_hw_queues(map, &instance->pdev->dev, offset);
|
||||
qoff += map->nr_queues;
|
||||
offset += map->nr_queues;
|
||||
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/errno.h>
|
||||
|
@ -4042,7 +4042,7 @@ static void mpi3mr_map_queues(struct Scsi_Host *shost)
|
||||
*/
|
||||
map->queue_offset = qoff;
|
||||
if (i != HCTX_TYPE_POLL)
|
||||
blk_mq_pci_map_queues(map, mrioc->pdev, offset);
|
||||
blk_mq_map_hw_queues(map, &mrioc->pdev->dev, offset);
|
||||
else
|
||||
blk_mq_map_queues(map);
|
||||
|
||||
|
@ -53,7 +53,6 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/raid_class.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/unaligned.h>
|
||||
|
||||
#include "mpt3sas_base.h"
|
||||
@ -11890,7 +11889,7 @@ static void scsih_map_queues(struct Scsi_Host *shost)
|
||||
*/
|
||||
map->queue_offset = qoff;
|
||||
if (i != HCTX_TYPE_POLL)
|
||||
blk_mq_pci_map_queues(map, ioc->pdev, offset);
|
||||
blk_mq_map_hw_queues(map, &ioc->pdev->dev, offset);
|
||||
else
|
||||
blk_mq_map_queues(map);
|
||||
|
||||
|
@ -105,7 +105,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
|
||||
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
|
||||
|
||||
if (pm8001_ha->number_of_intr > 1) {
|
||||
blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
|
||||
blk_mq_map_hw_queues(qmap, &pm8001_ha->pdev->dev, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -56,7 +56,6 @@
|
||||
#include <scsi/sas_ata.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include "pm8001_defs.h"
|
||||
|
||||
#define DRV_NAME "pm80xx"
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/nvme.h>
|
||||
#include <linux/nvme-fc.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/blk-mq.h>
|
||||
|
||||
static struct nvme_fc_port_template qla_nvme_fc_transport;
|
||||
@ -841,7 +840,7 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
|
||||
{
|
||||
struct scsi_qla_host *vha = lport->private;
|
||||
|
||||
blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
|
||||
blk_mq_map_hw_queues(map, &vha->hw->pdev->dev, vha->irq_offset);
|
||||
}
|
||||
|
||||
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <linux/trace_events.h>
|
||||
@ -8071,7 +8070,8 @@ static void qla2xxx_map_queues(struct Scsi_Host *shost)
|
||||
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
|
||||
blk_mq_map_queues(qmap);
|
||||
else
|
||||
blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
|
||||
blk_mq_map_hw_queues(qmap, &vha->hw->pdev->dev,
|
||||
vha->irq_offset);
|
||||
}
|
||||
|
||||
struct scsi_host_template qla2xxx_driver_template = {
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <linux/bcd.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/cciss_ioctl.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
@ -6547,10 +6546,10 @@ static void pqi_map_queues(struct Scsi_Host *shost)
|
||||
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
|
||||
|
||||
if (!ctrl_info->disable_managed_interrupts)
|
||||
return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
||||
ctrl_info->pci_dev, 0);
|
||||
blk_mq_map_hw_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
||||
&ctrl_info->pci_dev->dev, 0);
|
||||
else
|
||||
return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
|
||||
blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
|
||||
}
|
||||
|
||||
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <scsi/scsi_devinfo.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/blk-mq-virtio.h>
|
||||
|
||||
#include "sd.h"
|
||||
|
||||
@ -746,7 +745,7 @@ static void virtscsi_map_queues(struct Scsi_Host *shost)
|
||||
if (i == HCTX_TYPE_POLL)
|
||||
blk_mq_map_queues(map);
|
||||
else
|
||||
blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
|
||||
blk_mq_map_hw_queues(map, &vscsi->vdev->dev, 2);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -377,6 +377,24 @@ static void virtio_dev_remove(struct device *_d)
|
||||
of_node_put(dev->dev.of_node);
|
||||
}
|
||||
|
||||
/*
|
||||
* virtio_irq_get_affinity - get IRQ affinity mask for device
|
||||
* @_d: ptr to dev structure
|
||||
* @irq_vec: interrupt vector number
|
||||
*
|
||||
* Return the CPU affinity mask for @_d and @irq_vec.
|
||||
*/
|
||||
static const struct cpumask *virtio_irq_get_affinity(struct device *_d,
|
||||
unsigned int irq_vec)
|
||||
{
|
||||
struct virtio_device *dev = dev_to_virtio(_d);
|
||||
|
||||
if (!dev->config->get_vq_affinity)
|
||||
return NULL;
|
||||
|
||||
return dev->config->get_vq_affinity(dev, irq_vec);
|
||||
}
|
||||
|
||||
static const struct bus_type virtio_bus = {
|
||||
.name = "virtio",
|
||||
.match = virtio_dev_match,
|
||||
@ -384,6 +402,7 @@ static const struct bus_type virtio_bus = {
|
||||
.uevent = virtio_uevent,
|
||||
.probe = virtio_dev_probe,
|
||||
.remove = virtio_dev_remove,
|
||||
.irq_get_affinity = virtio_irq_get_affinity,
|
||||
};
|
||||
|
||||
int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
|
||||
|
@ -1,11 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_BLK_MQ_PCI_H
|
||||
#define _LINUX_BLK_MQ_PCI_H
|
||||
|
||||
struct blk_mq_queue_map;
|
||||
struct pci_dev;
|
||||
|
||||
void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
|
||||
int offset);
|
||||
|
||||
#endif /* _LINUX_BLK_MQ_PCI_H */
|
@ -1,11 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_BLK_MQ_VIRTIO_H
|
||||
#define _LINUX_BLK_MQ_VIRTIO_H
|
||||
|
||||
struct blk_mq_queue_map;
|
||||
struct virtio_device;
|
||||
|
||||
void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
|
||||
struct virtio_device *vdev, int first_vec);
|
||||
|
||||
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
|
@ -921,6 +921,8 @@ void blk_mq_unfreeze_queue_non_owner(struct request_queue *q);
|
||||
void blk_freeze_queue_start_non_owner(struct request_queue *q);
|
||||
|
||||
void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
|
||||
void blk_mq_map_hw_queues(struct blk_mq_queue_map *qmap,
|
||||
struct device *dev, unsigned int offset);
|
||||
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
|
||||
|
||||
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
|
||||
|
@ -48,6 +48,7 @@ struct fwnode_handle;
|
||||
* will never get called until they do.
|
||||
* @remove: Called when a device removed from this bus.
|
||||
* @shutdown: Called at shut-down time to quiesce the device.
|
||||
* @irq_get_affinity: Get IRQ affinity mask for the device on this bus.
|
||||
*
|
||||
* @online: Called to put the device back online (after offlining it).
|
||||
* @offline: Called to put the device offline for hot-removal. May fail.
|
||||
@ -87,6 +88,8 @@ struct bus_type {
|
||||
void (*sync_state)(struct device *dev);
|
||||
void (*remove)(struct device *dev);
|
||||
void (*shutdown)(struct device *dev);
|
||||
const struct cpumask *(*irq_get_affinity)(struct device *dev,
|
||||
unsigned int irq_vec);
|
||||
|
||||
int (*online)(struct device *dev);
|
||||
int (*offline)(struct device *dev);
|
||||
|
Loading…
Reference in New Issue
Block a user