2019-05-22 07:51:24 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2014-12-08 14:39:45 +00:00
|
|
|
#ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
|
|
|
|
#define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H
|
2014-12-07 16:41:16 +00:00
|
|
|
/*
|
2014-12-08 10:31:02 +00:00
|
|
|
* Virtio PCI driver - APIs for common functionality for all device versions
|
2014-12-07 16:41:16 +00:00
|
|
|
*
|
|
|
|
* This module allows virtio devices to be used over a virtual PCI device.
|
|
|
|
* This can be used with QEMU based VMMs like KVM or Xen.
|
|
|
|
*
|
|
|
|
* Copyright IBM Corp. 2007
|
2014-12-08 10:31:02 +00:00
|
|
|
* Copyright Red Hat, Inc. 2014
|
2014-12-07 16:41:16 +00:00
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
2014-12-08 10:31:02 +00:00
|
|
|
* Rusty Russell <rusty@rustcorp.com.au>
|
|
|
|
* Michael S. Tsirkin <mst@redhat.com>
|
2014-12-07 16:41:16 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/virtio.h>
|
|
|
|
#include <linux/virtio_config.h>
|
|
|
|
#include <linux/virtio_ring.h>
|
|
|
|
#include <linux/virtio_pci.h>
|
2021-10-29 09:14:42 +00:00
|
|
|
#include <linux/virtio_pci_legacy.h>
|
2021-01-04 06:55:00 +00:00
|
|
|
#include <linux/virtio_pci_modern.h>
|
2014-12-07 16:41:16 +00:00
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/spinlock.h>
|
2023-12-19 09:32:41 +00:00
|
|
|
#include <linux/mutex.h>
|
2014-12-07 16:41:16 +00:00
|
|
|
|
2017-04-04 18:44:44 +00:00
|
|
|
struct virtio_pci_vq_info {
|
|
|
|
/* the actual virtqueue */
|
|
|
|
struct virtqueue *vq;
|
|
|
|
|
2024-07-16 11:35:44 +00:00
|
|
|
/* the list node for the virtqueues or slow_virtqueues list */
|
2017-04-04 18:44:44 +00:00
|
|
|
struct list_head node;
|
|
|
|
|
|
|
|
/* MSI-X vector (or none) */
|
2022-04-18 02:54:35 +00:00
|
|
|
unsigned int msix_vector;
|
2017-04-04 18:44:44 +00:00
|
|
|
};
|
|
|
|
|
2023-12-19 09:32:40 +00:00
|
|
|
struct virtio_pci_admin_vq {
|
|
|
|
/* Virtqueue info associated with this admin queue. */
|
2024-07-16 11:35:48 +00:00
|
|
|
struct virtio_pci_vq_info *info;
|
2024-07-16 11:35:51 +00:00
|
|
|
/* Protects virtqueue access. */
|
|
|
|
spinlock_t lock;
|
2023-12-19 09:32:43 +00:00
|
|
|
u64 supported_cmds;
|
2024-11-13 11:51:56 +00:00
|
|
|
u64 supported_caps;
|
|
|
|
u8 max_dev_parts_objects;
|
|
|
|
struct ida dev_parts_ida;
|
2023-12-19 09:32:40 +00:00
|
|
|
/* Name of the admin queue: avq.$vq_index. */
|
|
|
|
char name[10];
|
|
|
|
u16 vq_index;
|
|
|
|
};
|
|
|
|
|
2021-01-04 06:54:46 +00:00
|
|
|
/* Our device structure */
|
|
|
|
struct virtio_pci_device {
|
|
|
|
struct virtio_device vdev;
|
|
|
|
struct pci_dev *pci_dev;
|
virtio_pci: Optimize virtio_pci_device structure size
Improve the size of the virtio_pci_device structure, which is commonly
used to represent a virtio PCI device. A given virtio PCI device can
either of legacy type or modern type, with the
struct virtio_pci_legacy_device occupying 32 bytes and the
struct virtio_pci_modern_device occupying 88 bytes. Make them a union,
thereby save 32 bytes of memory as shown by the pahole tool. This
improvement is particularly beneficial when dealing with numerous
devices, as it helps conserve memory resources.
Before the modification, pahole tool reported the following:
struct virtio_pci_device {
[...]
struct virtio_pci_legacy_device ldev; /* 824 32 */
/* --- cacheline 13 boundary (832 bytes) was 24 bytes ago --- */
struct virtio_pci_modern_device mdev; /* 856 88 */
/* XXX last struct has 4 bytes of padding */
[...]
/* size: 1056, cachelines: 17, members: 19 */
[...]
};
After the modification, pahole tool reported the following:
struct virtio_pci_device {
[...]
union {
struct virtio_pci_legacy_device ldev; /* 824 32 */
struct virtio_pci_modern_device mdev; /* 824 88 */
}; /* 824 88 */
[...]
/* size: 1024, cachelines: 16, members: 18 */
[...]
};
Signed-off-by: Feng Liu <feliu@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Message-Id: <20230516135446.16266-1-feliu@nvidia.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Acked-by: Jason Wang <jasowang@redhat.com>
2023-05-16 13:54:46 +00:00
|
|
|
union {
|
|
|
|
struct virtio_pci_legacy_device ldev;
|
|
|
|
struct virtio_pci_modern_device mdev;
|
|
|
|
};
|
2021-10-29 09:14:42 +00:00
|
|
|
bool is_legacy;
|
|
|
|
|
2021-01-04 06:54:46 +00:00
|
|
|
/* Where to read and clear interrupt */
|
|
|
|
u8 __iomem *isr;
|
|
|
|
|
2024-07-16 11:35:44 +00:00
|
|
|
/* Lists of queues and potentially slow path queues
|
|
|
|
* so we can dispatch IRQs.
|
|
|
|
*/
|
2017-04-04 18:44:44 +00:00
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head virtqueues;
|
2024-07-16 11:35:44 +00:00
|
|
|
struct list_head slow_virtqueues;
|
2017-04-04 18:44:44 +00:00
|
|
|
|
2023-12-19 09:32:40 +00:00
|
|
|
/* Array of all virtqueues reported in the
|
|
|
|
* PCI common config num_queues field
|
|
|
|
*/
|
2017-04-04 18:44:44 +00:00
|
|
|
struct virtio_pci_vq_info **vqs;
|
|
|
|
|
2023-12-19 09:32:40 +00:00
|
|
|
struct virtio_pci_admin_vq admin_vq;
|
|
|
|
|
2017-04-04 18:09:20 +00:00
|
|
|
/* MSI-X support */
|
|
|
|
int msix_enabled;
|
2017-04-04 18:15:41 +00:00
|
|
|
int intx_enabled;
|
2014-12-07 16:41:16 +00:00
|
|
|
cpumask_var_t *msix_affinity_masks;
|
|
|
|
/* Name strings for interrupts. This size should be enough,
|
|
|
|
* and I'm too lazy to allocate each name separately. */
|
|
|
|
char (*msix_names)[256];
|
2017-04-04 18:15:41 +00:00
|
|
|
/* Number of available vectors */
|
2022-04-18 02:54:35 +00:00
|
|
|
unsigned int msix_vectors;
|
2017-04-04 18:15:41 +00:00
|
|
|
/* Vectors allocated, excluding per-vq vectors if any */
|
2022-04-18 02:54:35 +00:00
|
|
|
unsigned int msix_used_vectors;
|
2017-04-04 18:15:41 +00:00
|
|
|
|
2017-04-04 18:44:44 +00:00
|
|
|
/* Whether we have vector per vq */
|
|
|
|
bool per_vq_vectors;
|
2014-12-07 16:41:16 +00:00
|
|
|
|
|
|
|
struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev,
|
2017-04-04 18:44:44 +00:00
|
|
|
struct virtio_pci_vq_info *info,
|
2022-04-18 02:54:35 +00:00
|
|
|
unsigned int idx,
|
2014-12-07 16:41:16 +00:00
|
|
|
void (*callback)(struct virtqueue *vq),
|
|
|
|
const char *name,
|
2017-03-06 16:32:29 +00:00
|
|
|
bool ctx,
|
2014-12-07 16:41:16 +00:00
|
|
|
u16 msix_vec);
|
2017-04-04 18:44:44 +00:00
|
|
|
void (*del_vq)(struct virtio_pci_vq_info *info);
|
2014-12-07 16:41:16 +00:00
|
|
|
|
|
|
|
u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector);
|
2024-07-16 11:35:48 +00:00
|
|
|
int (*avq_index)(struct virtio_device *vdev, u16 *index, u16 *num);
|
2014-12-07 16:41:16 +00:00
|
|
|
};
|
|
|
|
|
2017-04-04 18:15:41 +00:00
|
|
|
/* Constants for MSI-X */
|
|
|
|
/* Use first vector for configuration changes, second and the rest for
|
|
|
|
* virtqueues Thus, we need at least 2 vectors for MSI. */
|
|
|
|
enum {
|
|
|
|
VP_MSIX_CONFIG_VECTOR = 0,
|
|
|
|
VP_MSIX_VQ_VECTOR = 1,
|
|
|
|
};
|
|
|
|
|
2014-12-07 16:41:16 +00:00
|
|
|
/* Convert a generic virtio device to our structure */
|
|
|
|
static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
|
|
|
|
{
|
|
|
|
return container_of(vdev, struct virtio_pci_device, vdev);
|
|
|
|
}
|
|
|
|
|
2022-03-23 03:15:24 +00:00
|
|
|
/* wait for pending irq handlers */
|
|
|
|
void vp_synchronize_vectors(struct virtio_device *vdev);
|
2014-12-07 16:41:16 +00:00
|
|
|
/* the notify function used when creating a virt queue */
|
|
|
|
bool vp_notify(struct virtqueue *vq);
|
|
|
|
/* the config->del_vqs() implementation */
|
|
|
|
void vp_del_vqs(struct virtio_device *vdev);
|
|
|
|
/* the config->find_vqs() implementation */
|
2022-04-18 02:54:35 +00:00
|
|
|
int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
|
2024-07-08 07:48:00 +00:00
|
|
|
struct virtqueue *vqs[], struct virtqueue_info vqs_info[],
|
2017-03-06 16:32:29 +00:00
|
|
|
struct irq_affinity *desc);
|
2014-12-07 16:41:16 +00:00
|
|
|
const char *vp_bus_name(struct virtio_device *vdev);
|
|
|
|
|
|
|
|
/* Setup the affinity for a virtqueue:
|
|
|
|
* - force the affinity for per vq vector
|
|
|
|
* - OR over all affinities for shared MSI
|
|
|
|
* - ignore the affinity request if we're using INTX
|
|
|
|
*/
|
2018-08-10 01:18:28 +00:00
|
|
|
int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask);
|
2014-12-07 16:41:16 +00:00
|
|
|
|
2017-02-05 17:15:23 +00:00
|
|
|
const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index);
|
|
|
|
|
2015-01-15 14:06:26 +00:00
|
|
|
#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
|
2015-01-13 09:23:32 +00:00
|
|
|
int virtio_pci_legacy_probe(struct virtio_pci_device *);
|
|
|
|
void virtio_pci_legacy_remove(struct virtio_pci_device *);
|
2015-01-15 14:06:26 +00:00
|
|
|
#else
|
|
|
|
static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
|
|
|
|
{
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
2014-12-11 11:59:51 +00:00
|
|
|
int virtio_pci_modern_probe(struct virtio_pci_device *);
|
|
|
|
void virtio_pci_modern_remove(struct virtio_pci_device *);
|
2014-12-07 16:41:16 +00:00
|
|
|
|
2023-12-19 09:32:44 +00:00
|
|
|
struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev);
|
|
|
|
|
2023-12-19 09:32:43 +00:00
|
|
|
#define VIRTIO_LEGACY_ADMIN_CMD_BITMAP \
|
|
|
|
(BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_WRITE) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_COMMON_CFG_READ) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_WRITE) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO))
|
|
|
|
|
2024-11-13 11:51:56 +00:00
|
|
|
#define VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP \
|
|
|
|
(BIT_ULL(VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_DRIVER_CAP_SET) | \
|
2024-11-13 11:51:57 +00:00
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_DEVICE_CAP_GET) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_GET) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_DEV_PARTS_SET) | \
|
|
|
|
BIT_ULL(VIRTIO_ADMIN_CMD_DEV_MODE_SET))
|
2024-11-13 11:51:56 +00:00
|
|
|
|
2023-12-19 09:32:43 +00:00
|
|
|
/* Unlike modern drivers which support hardware virtio devices, legacy drivers
|
|
|
|
* assume software-based devices: e.g. they don't use proper memory barriers
|
|
|
|
* on ARM, use big endian on PPC, etc. X86 drivers are mostly ok though, more
|
|
|
|
* or less by chance. For now, only support legacy IO on X86.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_VIRTIO_PCI_ADMIN_LEGACY
|
2024-11-13 11:51:56 +00:00
|
|
|
#define VIRTIO_ADMIN_CMD_BITMAP (VIRTIO_LEGACY_ADMIN_CMD_BITMAP | \
|
|
|
|
VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP)
|
2023-12-19 09:32:43 +00:00
|
|
|
#else
|
2024-11-13 11:51:56 +00:00
|
|
|
#define VIRTIO_ADMIN_CMD_BITMAP VIRTIO_DEV_PARTS_ADMIN_CMD_BITMAP
|
2023-12-19 09:32:43 +00:00
|
|
|
#endif
|
|
|
|
|
2024-10-24 13:54:06 +00:00
|
|
|
bool vp_is_avq(struct virtio_device *vdev, unsigned int index);
|
2024-07-16 11:35:51 +00:00
|
|
|
void vp_modern_avq_done(struct virtqueue *vq);
|
2023-12-19 09:32:41 +00:00
|
|
|
int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
|
|
|
|
struct virtio_admin_cmd *cmd);
|
|
|
|
|
2014-12-07 16:41:16 +00:00
|
|
|
#endif
|