mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 23:39:18 +00:00
virtio/vhost: cross endian support
I have just queued some more bugfix patches today but none fix regressions and none are related to these ones, so it looks like a good time for a merge for -rc1. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJVk7JOAAoJECgfDbjSjVRpHgEIAKrgLd7gIQ8lO+LCYqne6WLQ Ky8rOUnaxX4gD5N0akhfJFr/m/yIyAfk9+ALZZUo3kfuFiEsT2rn32iK/2Gj8pcu HFoAWhS+7b/ZsfpHRPtv/zVD3q4c3nWsWpfWK09J+4t0UJuC8fmGMoBzkS0kjZtd dQnHlJi5+1u4ch2x9sYYeVx7GOJ8a1W0q7cWJnWdOffWLEP9/zB8fgRVLFp/7AAd uBlza93RU81wS7q5tSUph6ESPqt2yu357e//4jnWjVx5EUXDRBL3A/T1JpC1qYSn WV2Gv14x+LVz2G8WgGmwfMq1H9Dvd/OzNToX5R8SIRx6Rh5L6gxFQjqt4dclGj8= =nKap -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio/vhost cross endian support from Michael Tsirkin: "I have just queued some more bugfix patches today but none fix regressions and none are related to these ones, so it looks like a good time for a merge for -rc1. The motivation for this is support for legacy BE guests on the new LE hosts. There are two redeeming properties that made me merge this: - It's a trivial amount of code: since we wrap host/guest accesses anyway, almost all of it is well hidden from drivers. - Sane platforms would never set flags like VHOST_CROSS_ENDIAN_LEGACY, and when it's clear, there's zero overhead (as some point it was tested by compiling with and without the patches, got the same stripped binary). Maybe we could create a Kconfig symbol to enforce the second point: prevent people from enabling it eg on x86. I will look into this" * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: virtio-pci: alloc only resources actually used. macvtap/tun: cross-endian support for little-endian hosts vhost: cross-endian support for legacy devices virtio: add explicit big-endian support to memory accessors vhost: introduce vhost_is_little_endian() helper vringh: introduce vringh_is_little_endian() helper macvtap: introduce macvtap_is_little_endian() helper tun: add tun_is_little_endian() helper virtio: introduce virtio_is_little_endian() helper
This commit is contained in:
commit
5fc835284d
@ -258,6 +258,20 @@ config TUN
|
||||
|
||||
If you don't know what to use this for, you don't need it.
|
||||
|
||||
config TUN_VNET_CROSS_LE
|
||||
bool "Support for cross-endian vnet headers on little-endian kernels"
|
||||
default n
|
||||
---help---
|
||||
This option allows TUN/TAP and MACVTAP device drivers in a
|
||||
little-endian kernel to parse vnet headers that come from a
|
||||
big-endian legacy virtio device.
|
||||
|
||||
Userspace programs can control the feature using the TUNSETVNETBE
|
||||
and TUNGETVNETBE ioctls.
|
||||
|
||||
Unless you have a little-endian system hosting a big-endian virtual
|
||||
machine with a legacy virtio NIC, you should say N.
|
||||
|
||||
config VETH
|
||||
tristate "Virtual ethernet pair device"
|
||||
---help---
|
||||
|
@ -48,15 +48,70 @@ struct macvtap_queue {
|
||||
#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
|
||||
|
||||
#define MACVTAP_VNET_LE 0x80000000
|
||||
#define MACVTAP_VNET_BE 0x40000000
|
||||
|
||||
#ifdef CONFIG_TUN_VNET_CROSS_LE
|
||||
static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
|
||||
{
|
||||
return q->flags & MACVTAP_VNET_BE ? false :
|
||||
virtio_legacy_is_little_endian();
|
||||
}
|
||||
|
||||
static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp)
|
||||
{
|
||||
int s = !!(q->flags & MACVTAP_VNET_BE);
|
||||
|
||||
if (put_user(s, sp))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp)
|
||||
{
|
||||
int s;
|
||||
|
||||
if (get_user(s, sp))
|
||||
return -EFAULT;
|
||||
|
||||
if (s)
|
||||
q->flags |= MACVTAP_VNET_BE;
|
||||
else
|
||||
q->flags &= ~MACVTAP_VNET_BE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
|
||||
{
|
||||
return virtio_legacy_is_little_endian();
|
||||
}
|
||||
|
||||
static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif /* CONFIG_TUN_VNET_CROSS_LE */
|
||||
|
||||
static inline bool macvtap_is_little_endian(struct macvtap_queue *q)
|
||||
{
|
||||
return q->flags & MACVTAP_VNET_LE ||
|
||||
macvtap_legacy_is_little_endian(q);
|
||||
}
|
||||
|
||||
static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
|
||||
{
|
||||
return __virtio16_to_cpu(q->flags & MACVTAP_VNET_LE, val);
|
||||
return __virtio16_to_cpu(macvtap_is_little_endian(q), val);
|
||||
}
|
||||
|
||||
static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
|
||||
{
|
||||
return __cpu_to_virtio16(q->flags & MACVTAP_VNET_LE, val);
|
||||
return __cpu_to_virtio16(macvtap_is_little_endian(q), val);
|
||||
}
|
||||
|
||||
static struct proto macvtap_proto = {
|
||||
@ -1085,6 +1140,12 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
|
||||
q->flags &= ~MACVTAP_VNET_LE;
|
||||
return 0;
|
||||
|
||||
case TUNGETVNETBE:
|
||||
return macvtap_get_vnet_be(q, sp);
|
||||
|
||||
case TUNSETVNETBE:
|
||||
return macvtap_set_vnet_be(q, sp);
|
||||
|
||||
case TUNSETOFFLOAD:
|
||||
/* let the user check for future flags */
|
||||
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
|
||||
|
@ -111,6 +111,7 @@ do { \
|
||||
#define TUN_FASYNC IFF_ATTACH_QUEUE
|
||||
/* High bits in flags field are unused. */
|
||||
#define TUN_VNET_LE 0x80000000
|
||||
#define TUN_VNET_BE 0x40000000
|
||||
|
||||
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
|
||||
IFF_MULTI_QUEUE)
|
||||
@ -205,14 +206,68 @@ struct tun_struct {
|
||||
u32 flow_count;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_TUN_VNET_CROSS_LE
|
||||
static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
|
||||
{
|
||||
return tun->flags & TUN_VNET_BE ? false :
|
||||
virtio_legacy_is_little_endian();
|
||||
}
|
||||
|
||||
static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
|
||||
{
|
||||
int be = !!(tun->flags & TUN_VNET_BE);
|
||||
|
||||
if (put_user(be, argp))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
|
||||
{
|
||||
int be;
|
||||
|
||||
if (get_user(be, argp))
|
||||
return -EFAULT;
|
||||
|
||||
if (be)
|
||||
tun->flags |= TUN_VNET_BE;
|
||||
else
|
||||
tun->flags &= ~TUN_VNET_BE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
|
||||
{
|
||||
return virtio_legacy_is_little_endian();
|
||||
}
|
||||
|
||||
static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif /* CONFIG_TUN_VNET_CROSS_LE */
|
||||
|
||||
static inline bool tun_is_little_endian(struct tun_struct *tun)
|
||||
{
|
||||
return tun->flags & TUN_VNET_LE ||
|
||||
tun_legacy_is_little_endian(tun);
|
||||
}
|
||||
|
||||
static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
|
||||
{
|
||||
return __virtio16_to_cpu(tun->flags & TUN_VNET_LE, val);
|
||||
return __virtio16_to_cpu(tun_is_little_endian(tun), val);
|
||||
}
|
||||
|
||||
static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
|
||||
{
|
||||
return __cpu_to_virtio16(tun->flags & TUN_VNET_LE, val);
|
||||
return __cpu_to_virtio16(tun_is_little_endian(tun), val);
|
||||
}
|
||||
|
||||
static inline u32 tun_hashfn(u32 rxhash)
|
||||
@ -2044,6 +2099,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
|
||||
tun->flags &= ~TUN_VNET_LE;
|
||||
break;
|
||||
|
||||
case TUNGETVNETBE:
|
||||
ret = tun_get_vnet_be(tun, argp);
|
||||
break;
|
||||
|
||||
case TUNSETVNETBE:
|
||||
ret = tun_set_vnet_be(tun, argp);
|
||||
break;
|
||||
|
||||
case TUNATTACHFILTER:
|
||||
/* Can be set only for TAPs */
|
||||
ret = -EINVAL;
|
||||
|
@ -32,3 +32,18 @@ config VHOST
|
||||
---help---
|
||||
This option is selected by any driver which needs to access
|
||||
the core of vhost.
|
||||
|
||||
config VHOST_CROSS_ENDIAN_LEGACY
|
||||
bool "Cross-endian support for vhost"
|
||||
default n
|
||||
---help---
|
||||
This option allows vhost to support guests with a different byte
|
||||
ordering from host while using legacy virtio.
|
||||
|
||||
Userspace programs can control the feature using the
|
||||
VHOST_SET_VRING_ENDIAN and VHOST_GET_VRING_ENDIAN ioctls.
|
||||
|
||||
This is only useful on a few platforms (ppc64 and arm64). Since it
|
||||
adds some overhead, it is disabled by default.
|
||||
|
||||
If unsure, say "N".
|
||||
|
@ -36,6 +36,77 @@ enum {
|
||||
#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
|
||||
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
|
||||
|
||||
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
|
||||
static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
|
||||
{
|
||||
vq->user_be = !virtio_legacy_is_little_endian();
|
||||
}
|
||||
|
||||
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
|
||||
{
|
||||
struct vhost_vring_state s;
|
||||
|
||||
if (vq->private_data)
|
||||
return -EBUSY;
|
||||
|
||||
if (copy_from_user(&s, argp, sizeof(s)))
|
||||
return -EFAULT;
|
||||
|
||||
if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
|
||||
s.num != VHOST_VRING_BIG_ENDIAN)
|
||||
return -EINVAL;
|
||||
|
||||
vq->user_be = s.num;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
|
||||
int __user *argp)
|
||||
{
|
||||
struct vhost_vring_state s = {
|
||||
.index = idx,
|
||||
.num = vq->user_be
|
||||
};
|
||||
|
||||
if (copy_to_user(argp, &s, sizeof(s)))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vhost_init_is_le(struct vhost_virtqueue *vq)
|
||||
{
|
||||
/* Note for legacy virtio: user_be is initialized at reset time
|
||||
* according to the host endianness. If userspace does not set an
|
||||
* explicit endianness, the default behavior is native endian, as
|
||||
* expected by legacy virtio.
|
||||
*/
|
||||
vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
|
||||
}
|
||||
#else
|
||||
static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
|
||||
{
|
||||
}
|
||||
|
||||
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
|
||||
{
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
|
||||
int __user *argp)
|
||||
{
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
static void vhost_init_is_le(struct vhost_virtqueue *vq)
|
||||
{
|
||||
if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
|
||||
vq->is_le = true;
|
||||
}
|
||||
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
|
||||
|
||||
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
|
||||
poll_table *pt)
|
||||
{
|
||||
@ -199,6 +270,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
|
||||
vq->call = NULL;
|
||||
vq->log_ctx = NULL;
|
||||
vq->memory = NULL;
|
||||
vq->is_le = virtio_legacy_is_little_endian();
|
||||
vhost_vq_reset_user_be(vq);
|
||||
}
|
||||
|
||||
static int vhost_worker(void *data)
|
||||
@ -806,6 +879,12 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
|
||||
} else
|
||||
filep = eventfp;
|
||||
break;
|
||||
case VHOST_SET_VRING_ENDIAN:
|
||||
r = vhost_set_vring_endian(vq, argp);
|
||||
break;
|
||||
case VHOST_GET_VRING_ENDIAN:
|
||||
r = vhost_get_vring_endian(vq, idx, argp);
|
||||
break;
|
||||
default:
|
||||
r = -ENOIOCTLCMD;
|
||||
}
|
||||
@ -1044,8 +1123,12 @@ int vhost_init_used(struct vhost_virtqueue *vq)
|
||||
{
|
||||
__virtio16 last_used_idx;
|
||||
int r;
|
||||
if (!vq->private_data)
|
||||
if (!vq->private_data) {
|
||||
vq->is_le = virtio_legacy_is_little_endian();
|
||||
return 0;
|
||||
}
|
||||
|
||||
vhost_init_is_le(vq);
|
||||
|
||||
r = vhost_update_used_flags(vq);
|
||||
if (r)
|
||||
|
@ -106,6 +106,14 @@ struct vhost_virtqueue {
|
||||
/* Log write descriptors */
|
||||
void __user *log_base;
|
||||
struct vhost_log *log;
|
||||
|
||||
/* Ring endianness. Defaults to legacy native endianness.
|
||||
* Set to true when starting a modern virtio device. */
|
||||
bool is_le;
|
||||
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
|
||||
/* Ring endianness requested by userspace for cross-endian support. */
|
||||
bool user_be;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct vhost_dev {
|
||||
@ -173,34 +181,39 @@ static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
|
||||
return vq->acked_features & (1ULL << bit);
|
||||
}
|
||||
|
||||
static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
|
||||
{
|
||||
return vq->is_le;
|
||||
}
|
||||
|
||||
/* Memory accessors */
|
||||
static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
|
||||
{
|
||||
return __virtio16_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
|
||||
return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
|
||||
}
|
||||
|
||||
static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
|
||||
{
|
||||
return __cpu_to_virtio16(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
|
||||
return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
|
||||
}
|
||||
|
||||
static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
|
||||
{
|
||||
return __virtio32_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
|
||||
return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
|
||||
}
|
||||
|
||||
static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
|
||||
{
|
||||
return __cpu_to_virtio32(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
|
||||
return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
|
||||
}
|
||||
|
||||
static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
|
||||
{
|
||||
return __virtio64_to_cpu(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
|
||||
return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
|
||||
}
|
||||
|
||||
static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
|
||||
{
|
||||
return __cpu_to_virtio64(vhost_has_feature(vq, VIRTIO_F_VERSION_1), val);
|
||||
return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
|
||||
}
|
||||
#endif
|
||||
|
@ -507,10 +507,6 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
|
||||
if (rc)
|
||||
goto err_enable_device;
|
||||
|
||||
rc = pci_request_regions(pci_dev, "virtio-pci");
|
||||
if (rc)
|
||||
goto err_request_regions;
|
||||
|
||||
if (force_legacy) {
|
||||
rc = virtio_pci_legacy_probe(vp_dev);
|
||||
/* Also try modern mode if we can't map BAR0 (no IO space). */
|
||||
@ -540,8 +536,6 @@ err_register:
|
||||
else
|
||||
virtio_pci_modern_remove(vp_dev);
|
||||
err_probe:
|
||||
pci_release_regions(pci_dev);
|
||||
err_request_regions:
|
||||
pci_disable_device(pci_dev);
|
||||
err_enable_device:
|
||||
kfree(vp_dev);
|
||||
@ -559,7 +553,6 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
|
||||
else
|
||||
virtio_pci_modern_remove(vp_dev);
|
||||
|
||||
pci_release_regions(pci_dev);
|
||||
pci_disable_device(pci_dev);
|
||||
}
|
||||
|
||||
|
@ -75,6 +75,8 @@ struct virtio_pci_device {
|
||||
/* Multiply queue_notify_off by this value. (non-legacy mode). */
|
||||
u32 notify_offset_multiplier;
|
||||
|
||||
int modern_bars;
|
||||
|
||||
/* Legacy only field */
|
||||
/* the IO mapping for the PCI config space */
|
||||
void __iomem *ioaddr;
|
||||
|
@ -215,6 +215,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
|
||||
int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = vp_dev->pci_dev;
|
||||
int rc;
|
||||
|
||||
/* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
|
||||
if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
|
||||
@ -226,9 +227,14 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = -ENOMEM;
|
||||
vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
|
||||
if (!vp_dev->ioaddr)
|
||||
return -ENOMEM;
|
||||
goto err_iomap;
|
||||
|
||||
vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
|
||||
|
||||
@ -246,6 +252,10 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
|
||||
vp_dev->del_vq = del_vq;
|
||||
|
||||
return 0;
|
||||
|
||||
err_iomap:
|
||||
pci_release_region(pci_dev, 0);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
|
||||
@ -253,4 +263,5 @@ void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
|
||||
struct pci_dev *pci_dev = vp_dev->pci_dev;
|
||||
|
||||
pci_iounmap(pci_dev, vp_dev->ioaddr);
|
||||
pci_release_region(pci_dev, 0);
|
||||
}
|
||||
|
@ -499,7 +499,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
|
||||
* Returns offset of the capability, or 0.
|
||||
*/
|
||||
static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
|
||||
u32 ioresource_types)
|
||||
u32 ioresource_types, int *bars)
|
||||
{
|
||||
int pos;
|
||||
|
||||
@ -520,8 +520,10 @@ static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
|
||||
|
||||
if (type == cfg_type) {
|
||||
if (pci_resource_len(dev, bar) &&
|
||||
pci_resource_flags(dev, bar) & ioresource_types)
|
||||
pci_resource_flags(dev, bar) & ioresource_types) {
|
||||
*bars |= (1 << bar);
|
||||
return pos;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -617,7 +619,8 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
|
||||
|
||||
/* check for a common config: if not, use legacy mode (bar 0). */
|
||||
common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
|
||||
IORESOURCE_IO | IORESOURCE_MEM);
|
||||
IORESOURCE_IO | IORESOURCE_MEM,
|
||||
&vp_dev->modern_bars);
|
||||
if (!common) {
|
||||
dev_info(&pci_dev->dev,
|
||||
"virtio_pci: leaving for legacy driver\n");
|
||||
@ -626,9 +629,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
|
||||
|
||||
/* If common is there, these should be too... */
|
||||
isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
|
||||
IORESOURCE_IO | IORESOURCE_MEM);
|
||||
IORESOURCE_IO | IORESOURCE_MEM,
|
||||
&vp_dev->modern_bars);
|
||||
notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
|
||||
IORESOURCE_IO | IORESOURCE_MEM);
|
||||
IORESOURCE_IO | IORESOURCE_MEM,
|
||||
&vp_dev->modern_bars);
|
||||
if (!isr || !notify) {
|
||||
dev_err(&pci_dev->dev,
|
||||
"virtio_pci: missing capabilities %i/%i/%i\n",
|
||||
@ -640,7 +645,13 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
|
||||
* device-specific configuration.
|
||||
*/
|
||||
device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
|
||||
IORESOURCE_IO | IORESOURCE_MEM);
|
||||
IORESOURCE_IO | IORESOURCE_MEM,
|
||||
&vp_dev->modern_bars);
|
||||
|
||||
err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars,
|
||||
"virtio-pci-modern");
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = -EINVAL;
|
||||
vp_dev->common = map_capability(pci_dev, common,
|
||||
@ -727,4 +738,5 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
|
||||
pci_iounmap(pci_dev, vp_dev->notify_base);
|
||||
pci_iounmap(pci_dev, vp_dev->isr);
|
||||
pci_iounmap(pci_dev, vp_dev->common);
|
||||
pci_release_selected_regions(pci_dev, vp_dev->modern_bars);
|
||||
}
|
||||
|
@ -3,17 +3,21 @@
|
||||
#include <linux/types.h>
|
||||
#include <uapi/linux/virtio_types.h>
|
||||
|
||||
/*
|
||||
* Low-level memory accessors for handling virtio in modern little endian and in
|
||||
* compatibility native endian format.
|
||||
*/
|
||||
static inline bool virtio_legacy_is_little_endian(void)
|
||||
{
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
|
||||
{
|
||||
if (little_endian)
|
||||
return le16_to_cpu((__force __le16)val);
|
||||
else
|
||||
return (__force u16)val;
|
||||
return be16_to_cpu((__force __be16)val);
|
||||
}
|
||||
|
||||
static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
|
||||
@ -21,7 +25,7 @@ static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
|
||||
if (little_endian)
|
||||
return (__force __virtio16)cpu_to_le16(val);
|
||||
else
|
||||
return (__force __virtio16)val;
|
||||
return (__force __virtio16)cpu_to_be16(val);
|
||||
}
|
||||
|
||||
static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
|
||||
@ -29,7 +33,7 @@ static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
|
||||
if (little_endian)
|
||||
return le32_to_cpu((__force __le32)val);
|
||||
else
|
||||
return (__force u32)val;
|
||||
return be32_to_cpu((__force __be32)val);
|
||||
}
|
||||
|
||||
static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
|
||||
@ -37,7 +41,7 @@ static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
|
||||
if (little_endian)
|
||||
return (__force __virtio32)cpu_to_le32(val);
|
||||
else
|
||||
return (__force __virtio32)val;
|
||||
return (__force __virtio32)cpu_to_be32(val);
|
||||
}
|
||||
|
||||
static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
|
||||
@ -45,7 +49,7 @@ static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
|
||||
if (little_endian)
|
||||
return le64_to_cpu((__force __le64)val);
|
||||
else
|
||||
return (__force u64)val;
|
||||
return be64_to_cpu((__force __be64)val);
|
||||
}
|
||||
|
||||
static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
|
||||
@ -53,7 +57,7 @@ static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
|
||||
if (little_endian)
|
||||
return (__force __virtio64)cpu_to_le64(val);
|
||||
else
|
||||
return (__force __virtio64)val;
|
||||
return (__force __virtio64)cpu_to_be64(val);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_VIRTIO_BYTEORDER */
|
||||
|
@ -205,35 +205,41 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool virtio_is_little_endian(struct virtio_device *vdev)
|
||||
{
|
||||
return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
|
||||
virtio_legacy_is_little_endian();
|
||||
}
|
||||
|
||||
/* Memory accessors */
|
||||
static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
|
||||
{
|
||||
return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
|
||||
return __virtio16_to_cpu(virtio_is_little_endian(vdev), val);
|
||||
}
|
||||
|
||||
static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
|
||||
{
|
||||
return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
|
||||
return __cpu_to_virtio16(virtio_is_little_endian(vdev), val);
|
||||
}
|
||||
|
||||
static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
|
||||
{
|
||||
return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
|
||||
return __virtio32_to_cpu(virtio_is_little_endian(vdev), val);
|
||||
}
|
||||
|
||||
static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
|
||||
{
|
||||
return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
|
||||
return __cpu_to_virtio32(virtio_is_little_endian(vdev), val);
|
||||
}
|
||||
|
||||
static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
|
||||
{
|
||||
return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
|
||||
return __virtio64_to_cpu(virtio_is_little_endian(vdev), val);
|
||||
}
|
||||
|
||||
static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
|
||||
{
|
||||
return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
|
||||
return __cpu_to_virtio64(virtio_is_little_endian(vdev), val);
|
||||
}
|
||||
|
||||
/* Config space accessors. */
|
||||
|
@ -226,33 +226,39 @@ static inline void vringh_notify(struct vringh *vrh)
|
||||
vrh->notify(vrh);
|
||||
}
|
||||
|
||||
static inline bool vringh_is_little_endian(const struct vringh *vrh)
|
||||
{
|
||||
return vrh->little_endian ||
|
||||
virtio_legacy_is_little_endian();
|
||||
}
|
||||
|
||||
static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
|
||||
{
|
||||
return __virtio16_to_cpu(vrh->little_endian, val);
|
||||
return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
|
||||
}
|
||||
|
||||
static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
|
||||
{
|
||||
return __cpu_to_virtio16(vrh->little_endian, val);
|
||||
return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
|
||||
}
|
||||
|
||||
static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
|
||||
{
|
||||
return __virtio32_to_cpu(vrh->little_endian, val);
|
||||
return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
|
||||
}
|
||||
|
||||
static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
|
||||
{
|
||||
return __cpu_to_virtio32(vrh->little_endian, val);
|
||||
return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
|
||||
}
|
||||
|
||||
static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
|
||||
{
|
||||
return __virtio64_to_cpu(vrh->little_endian, val);
|
||||
return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
|
||||
}
|
||||
|
||||
static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
|
||||
{
|
||||
return __cpu_to_virtio64(vrh->little_endian, val);
|
||||
return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
|
||||
}
|
||||
#endif /* _LINUX_VRINGH_H */
|
||||
|
@ -50,6 +50,12 @@
|
||||
#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
|
||||
#define TUNSETVNETLE _IOW('T', 220, int)
|
||||
#define TUNGETVNETLE _IOR('T', 221, int)
|
||||
/* The TUNSETVNETBE and TUNGETVNETBE ioctls are for cross-endian support on
|
||||
* little-endian hosts. Not all kernel configurations support them, but all
|
||||
* configurations that support SET also support GET.
|
||||
*/
|
||||
#define TUNSETVNETBE _IOW('T', 222, int)
|
||||
#define TUNGETVNETBE _IOR('T', 223, int)
|
||||
|
||||
/* TUNSETIFF ifr flags */
|
||||
#define IFF_TUN 0x0001
|
||||
|
@ -103,6 +103,20 @@ struct vhost_memory {
|
||||
/* Get accessor: reads index, writes value in num */
|
||||
#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
|
||||
|
||||
/* Set the vring byte order in num. Valid values are VHOST_VRING_LITTLE_ENDIAN
|
||||
* or VHOST_VRING_BIG_ENDIAN (other values return -EINVAL).
|
||||
* The byte order cannot be changed while the device is active: trying to do so
|
||||
* returns -EBUSY.
|
||||
* This is a legacy only API that is simply ignored when VIRTIO_F_VERSION_1 is
|
||||
* set.
|
||||
* Not all kernel configurations support this ioctl, but all configurations that
|
||||
* support SET also support GET.
|
||||
*/
|
||||
#define VHOST_VRING_LITTLE_ENDIAN 0
|
||||
#define VHOST_VRING_BIG_ENDIAN 1
|
||||
#define VHOST_SET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x13, struct vhost_vring_state)
|
||||
#define VHOST_GET_VRING_ENDIAN _IOW(VHOST_VIRTIO, 0x14, struct vhost_vring_state)
|
||||
|
||||
/* The following ioctls use eventfd file descriptors to signal and poll
|
||||
* for events. */
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user