mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-19 14:56:21 +00:00
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git
This commit is contained in:
commit
c9b838ed26
@ -334,7 +334,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
|
||||
size_t size;
|
||||
|
||||
/* actual size of vring (in bytes) */
|
||||
size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
|
||||
size = PAGE_ALIGN(vring_size(rvring->num, rvring->align));
|
||||
|
||||
rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
|
||||
|
||||
@ -401,7 +401,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rvring->len = vring->num;
|
||||
rvring->num = vring->num;
|
||||
rvring->align = vring->align;
|
||||
rvring->rvdev = rvdev;
|
||||
|
||||
|
@ -87,7 +87,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
|
||||
struct fw_rsc_vdev *rsc;
|
||||
struct virtqueue *vq;
|
||||
void *addr;
|
||||
int len, size;
|
||||
int num, size;
|
||||
|
||||
/* we're temporarily limited to two virtqueues per rvdev */
|
||||
if (id >= ARRAY_SIZE(rvdev->vring))
|
||||
@ -104,20 +104,20 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
|
||||
|
||||
rvring = &rvdev->vring[id];
|
||||
addr = mem->va;
|
||||
len = rvring->len;
|
||||
num = rvring->num;
|
||||
|
||||
/* zero vring */
|
||||
size = vring_size(len, rvring->align);
|
||||
size = vring_size(num, rvring->align);
|
||||
memset(addr, 0, size);
|
||||
|
||||
dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
|
||||
id, addr, len, rvring->notifyid);
|
||||
id, addr, num, rvring->notifyid);
|
||||
|
||||
/*
|
||||
* Create the new vq, and tell virtio we're not interested in
|
||||
* the 'weak' smp barriers, since we're talking with a real device.
|
||||
*/
|
||||
vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx,
|
||||
vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
|
||||
addr, rproc_virtio_notify, callback, name);
|
||||
if (!vq) {
|
||||
dev_err(dev, "vring_new_virtqueue %s failed\n", name);
|
||||
|
@ -107,6 +107,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
|
||||
for (i = 0; i < vdpasim->dev_attr.nas; i++)
|
||||
vhost_iotlb_reset(&vdpasim->iommu[i]);
|
||||
|
||||
vdpasim->running = true;
|
||||
spin_unlock(&vdpasim->iommu_lock);
|
||||
|
||||
vdpasim->features = 0;
|
||||
@ -505,6 +506,24 @@ static int vdpasim_reset(struct vdpa_device *vdpa)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vdpasim_suspend(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
int i;
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
vdpasim->running = false;
|
||||
if (vdpasim->running) {
|
||||
/* Check for missed buffers */
|
||||
for (i = 0; i < vdpasim->dev_attr.nvqs; ++i)
|
||||
vdpasim_kick_vq(vdpa, i);
|
||||
|
||||
}
|
||||
spin_unlock(&vdpasim->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
|
||||
{
|
||||
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
|
||||
@ -694,6 +713,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
|
||||
.get_status = vdpasim_get_status,
|
||||
.set_status = vdpasim_set_status,
|
||||
.reset = vdpasim_reset,
|
||||
.suspend = vdpasim_suspend,
|
||||
.get_config_size = vdpasim_get_config_size,
|
||||
.get_config = vdpasim_get_config,
|
||||
.set_config = vdpasim_set_config,
|
||||
@ -726,6 +746,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
|
||||
.get_status = vdpasim_get_status,
|
||||
.set_status = vdpasim_set_status,
|
||||
.reset = vdpasim_reset,
|
||||
.suspend = vdpasim_suspend,
|
||||
.get_config_size = vdpasim_get_config_size,
|
||||
.get_config = vdpasim_get_config,
|
||||
.set_config = vdpasim_set_config,
|
||||
|
@ -66,6 +66,7 @@ struct vdpasim {
|
||||
u32 generation;
|
||||
u64 features;
|
||||
u32 groups;
|
||||
bool running;
|
||||
/* spinlock to synchronize iommu table */
|
||||
spinlock_t iommu_lock;
|
||||
};
|
||||
|
@ -204,6 +204,9 @@ static void vdpasim_blk_work(struct work_struct *work)
|
||||
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||
goto out;
|
||||
|
||||
if (!vdpasim->running)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) {
|
||||
struct vdpasim_virtqueue *vq = &vdpasim->vqs[i];
|
||||
|
||||
|
@ -154,6 +154,9 @@ static void vdpasim_net_work(struct work_struct *work)
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
|
||||
if (!vdpasim->running)
|
||||
goto out;
|
||||
|
||||
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
|
||||
goto out;
|
||||
|
||||
|
@ -347,6 +347,14 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
|
||||
{
|
||||
struct vdpa_device *vdpa = v->vdpa;
|
||||
const struct vdpa_config_ops *ops = vdpa->config;
|
||||
|
||||
return ops->suspend;
|
||||
}
|
||||
|
||||
static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
|
||||
{
|
||||
struct vdpa_device *vdpa = v->vdpa;
|
||||
@ -470,6 +478,22 @@ static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* After a successful return of ioctl the device must not process more
|
||||
* virtqueue descriptors. The device can answer to read or writes of config
|
||||
* fields as if it were not suspended. In particular, writing to "queue_enable"
|
||||
* with a value of 1 will not make the device start processing buffers.
|
||||
*/
|
||||
static long vhost_vdpa_suspend(struct vhost_vdpa *v)
|
||||
{
|
||||
struct vdpa_device *vdpa = v->vdpa;
|
||||
const struct vdpa_config_ops *ops = vdpa->config;
|
||||
|
||||
if (!ops->suspend)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return ops->suspend(vdpa);
|
||||
}
|
||||
|
||||
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
|
||||
void __user *argp)
|
||||
{
|
||||
@ -577,7 +601,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
|
||||
if (cmd == VHOST_SET_BACKEND_FEATURES) {
|
||||
if (copy_from_user(&features, featurep, sizeof(features)))
|
||||
return -EFAULT;
|
||||
if (features & ~VHOST_VDPA_BACKEND_FEATURES)
|
||||
if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
|
||||
BIT_ULL(VHOST_BACKEND_F_SUSPEND)))
|
||||
return -EOPNOTSUPP;
|
||||
if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
|
||||
!vhost_vdpa_can_suspend(v))
|
||||
return -EOPNOTSUPP;
|
||||
vhost_set_backend_features(&v->vdev, features);
|
||||
return 0;
|
||||
@ -628,6 +656,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
|
||||
break;
|
||||
case VHOST_GET_BACKEND_FEATURES:
|
||||
features = VHOST_VDPA_BACKEND_FEATURES;
|
||||
if (vhost_vdpa_can_suspend(v))
|
||||
features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
|
||||
if (copy_to_user(featurep, &features, sizeof(features)))
|
||||
r = -EFAULT;
|
||||
break;
|
||||
@ -640,6 +670,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
|
||||
case VHOST_VDPA_GET_VQS_COUNT:
|
||||
r = vhost_vdpa_get_vqs_count(v, argp);
|
||||
break;
|
||||
case VHOST_VDPA_SUSPEND:
|
||||
r = vhost_vdpa_suspend(v);
|
||||
break;
|
||||
default:
|
||||
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
|
||||
if (r == -ENOIOCTLCMD)
|
||||
|
@ -1095,7 +1095,8 @@ EXPORT_SYMBOL(vringh_need_notify_kern);
|
||||
#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
|
||||
|
||||
static int iotlb_translate(const struct vringh *vrh,
|
||||
u64 addr, u64 len, struct bio_vec iov[],
|
||||
u64 addr, u64 len, u64 *translated,
|
||||
struct bio_vec iov[],
|
||||
int iov_size, u32 perm)
|
||||
{
|
||||
struct vhost_iotlb_map *map;
|
||||
@ -1136,43 +1137,76 @@ static int iotlb_translate(const struct vringh *vrh,
|
||||
|
||||
spin_unlock(vrh->iotlb_lock);
|
||||
|
||||
if (translated)
|
||||
*translated = min(len, s);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
|
||||
void *src, size_t len)
|
||||
{
|
||||
struct iov_iter iter;
|
||||
struct bio_vec iov[16];
|
||||
int ret;
|
||||
u64 total_translated = 0;
|
||||
|
||||
ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
|
||||
len, iov, 16, VHOST_MAP_RO);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
while (total_translated < len) {
|
||||
struct bio_vec iov[16];
|
||||
struct iov_iter iter;
|
||||
u64 translated;
|
||||
int ret;
|
||||
|
||||
iov_iter_bvec(&iter, READ, iov, ret, len);
|
||||
ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
|
||||
len - total_translated, &translated,
|
||||
iov, ARRAY_SIZE(iov), VHOST_MAP_RO);
|
||||
if (ret == -ENOBUFS)
|
||||
ret = ARRAY_SIZE(iov);
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = copy_from_iter(dst, len, &iter);
|
||||
iov_iter_bvec(&iter, READ, iov, ret, translated);
|
||||
|
||||
return ret;
|
||||
ret = copy_from_iter(dst, translated, &iter);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
src += translated;
|
||||
dst += translated;
|
||||
total_translated += translated;
|
||||
}
|
||||
|
||||
return total_translated;
|
||||
}
|
||||
|
||||
static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
|
||||
void *src, size_t len)
|
||||
{
|
||||
struct iov_iter iter;
|
||||
struct bio_vec iov[16];
|
||||
int ret;
|
||||
u64 total_translated = 0;
|
||||
|
||||
ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
|
||||
len, iov, 16, VHOST_MAP_WO);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
while (total_translated < len) {
|
||||
struct bio_vec iov[16];
|
||||
struct iov_iter iter;
|
||||
u64 translated;
|
||||
int ret;
|
||||
|
||||
iov_iter_bvec(&iter, WRITE, iov, ret, len);
|
||||
ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
|
||||
len - total_translated, &translated,
|
||||
iov, ARRAY_SIZE(iov), VHOST_MAP_WO);
|
||||
if (ret == -ENOBUFS)
|
||||
ret = ARRAY_SIZE(iov);
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return copy_to_iter(src, len, &iter);
|
||||
iov_iter_bvec(&iter, WRITE, iov, ret, translated);
|
||||
|
||||
ret = copy_to_iter(src, translated, &iter);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
src += translated;
|
||||
dst += translated;
|
||||
total_translated += translated;
|
||||
}
|
||||
|
||||
return total_translated;
|
||||
}
|
||||
|
||||
static inline int getu16_iotlb(const struct vringh *vrh,
|
||||
@ -1183,7 +1217,7 @@ static inline int getu16_iotlb(const struct vringh *vrh,
|
||||
int ret;
|
||||
|
||||
/* Atomic read is needed for getu16 */
|
||||
ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
|
||||
ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
|
||||
&iov, 1, VHOST_MAP_RO);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@ -1204,7 +1238,7 @@ static inline int putu16_iotlb(const struct vringh *vrh,
|
||||
int ret;
|
||||
|
||||
/* Atomic write is needed for putu16 */
|
||||
ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
|
||||
ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), NULL,
|
||||
&iov, 1, VHOST_MAP_WO);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -1637,8 +1637,7 @@ static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct vring_desc_extra *vring_alloc_desc_extra(struct vring_virtqueue *vq,
|
||||
unsigned int num)
|
||||
static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
|
||||
{
|
||||
struct vring_desc_extra *desc_extra;
|
||||
unsigned int i;
|
||||
@ -1759,7 +1758,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
|
||||
/* Put everything in free lists. */
|
||||
vq->free_head = 0;
|
||||
|
||||
vq->packed.desc_extra = vring_alloc_desc_extra(vq, num);
|
||||
vq->packed.desc_extra = vring_alloc_desc_extra(num);
|
||||
if (!vq->packed.desc_extra)
|
||||
goto err_desc_extra;
|
||||
|
||||
@ -2248,7 +2247,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
||||
if (!vq->split.desc_state)
|
||||
goto err_state;
|
||||
|
||||
vq->split.desc_extra = vring_alloc_desc_extra(vq, vring.num);
|
||||
vq->split.desc_extra = vring_alloc_desc_extra(vring.num);
|
||||
if (!vq->split.desc_extra)
|
||||
goto err_extra;
|
||||
|
||||
|
@ -597,7 +597,7 @@ struct rproc_subdev {
|
||||
/**
|
||||
* struct rproc_vring - remoteproc vring state
|
||||
* @va: virtual address
|
||||
* @len: length, in bytes
|
||||
* @num: vring size
|
||||
* @da: device address
|
||||
* @align: vring alignment
|
||||
* @notifyid: rproc-specific unique vring index
|
||||
@ -606,7 +606,7 @@ struct rproc_subdev {
|
||||
*/
|
||||
struct rproc_vring {
|
||||
void *va;
|
||||
int len;
|
||||
int num;
|
||||
u32 da;
|
||||
u32 align;
|
||||
int notifyid;
|
||||
|
@ -218,6 +218,9 @@ struct vdpa_map_file {
|
||||
* @reset: Reset device
|
||||
* @vdev: vdpa device
|
||||
* Returns integer: success (0) or error (< 0)
|
||||
* @suspend: Suspend or resume the device (optional)
|
||||
* @vdev: vdpa device
|
||||
* Returns integer: success (0) or error (< 0)
|
||||
* @get_config_size: Get the size of the configuration space includes
|
||||
* fields that are conditional on feature bits.
|
||||
* @vdev: vdpa device
|
||||
@ -319,6 +322,7 @@ struct vdpa_config_ops {
|
||||
u8 (*get_status)(struct vdpa_device *vdev);
|
||||
void (*set_status)(struct vdpa_device *vdev, u8 status);
|
||||
int (*reset)(struct vdpa_device *vdev);
|
||||
int (*suspend)(struct vdpa_device *vdev);
|
||||
size_t (*get_config_size)(struct vdpa_device *vdev);
|
||||
void (*get_config)(struct vdpa_device *vdev, unsigned int offset,
|
||||
void *buf, unsigned int len);
|
||||
|
@ -171,4 +171,18 @@
|
||||
#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
|
||||
struct vhost_vring_state)
|
||||
|
||||
/* Suspend or resume a device so it does not process virtqueue requests anymore
|
||||
*
|
||||
* After the return of ioctl with suspend != 0, the device must finish any
|
||||
* pending operations like in flight requests. It must also preserve all the
|
||||
* necessary state (the virtqueue vring base plus the possible device specific
|
||||
* states) that is required for restoring in the future. The device must not
|
||||
* change its configuration after that point.
|
||||
*
|
||||
* After the return of ioctl with suspend == 0, the device can continue
|
||||
* processing buffers as long as typical conditions are met (vq is enabled,
|
||||
* DRIVER_OK status bit is enabled, etc).
|
||||
*/
|
||||
#define VHOST_VDPA_SUSPEND _IOW(VHOST_VIRTIO, 0x7D, int)
|
||||
|
||||
#endif
|
||||
|
@ -161,5 +161,7 @@ struct vhost_vdpa_iova_range {
|
||||
* message
|
||||
*/
|
||||
#define VHOST_BACKEND_F_IOTLB_ASID 0x3
|
||||
/* Device can be suspended */
|
||||
#define VHOST_BACKEND_F_SUSPEND 0x4
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user