mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
virtio: features, fixes
virtio-mem doorbell mapping for vdpa config interrupt support in ifc fixes all over the place Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQFDBAABCAAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAl7fZ6APHG1zdEByZWRo YXQuY29tAAoJECgfDbjSjVRpkDoIAMcBcQx5su1iuX7vT35xzUWZO478eAf1jOMZ 7KxKUVBeztkcxVFUlRVRu9MR6wOzwHils+1HD6025775Smr5M6x3aJxR6xOORaBj RoU6OVGkpDvbzsxlhW+xhONz4O7/RkveKJPCwzGjqHrsFeh92lkfTqroz/EuNpw+ LZsO0+DhdUf123HbwHQp5lxW8EjyrRabgeZZg/D9VLPhoCP88vCjRhBXU2GPuaUl /UNXsQafn4xUgrxPaoN5f4Phn/P46NNrbZ1jmlkw/z/3QhF/DhktGXGaZsIHDCN/ vicUii0or5QLeBsZpMbKko/BIe2xWHxFjkMRhMOMZOfcBb6sMBI= =auUa -----END PGP SIGNATURE----- Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost Pull virtio updates from Michael Tsirkin: - virtio-mem: paravirtualized memory hotplug - support doorbell mapping for vdpa - config interrupt support in ifc - fixes all over the place * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (40 commits) vhost/test: fix up after API change virtio_mem: convert device block size into 64bit virtio-mem: drop unnecessary initialization ifcvf: implement config interrupt in IFCVF vhost: replace -1 with VHOST_FILE_UNBIND in ioctls vhost_vdpa: Support config interrupt in vdpa ifcvf: ignore continuous setting same status value virtio-mem: Don't rely on implicit compiler padding for requests virtio-mem: Try to unplug the complete online memory block first virtio-mem: Use -ETXTBSY as error code if the device is busy virtio-mem: Unplug subblocks right-to-left virtio-mem: Drop manual check for already present memory virtio-mem: Add parent resource for all added "System RAM" virtio-mem: Better retry handling virtio-mem: Offline and remove completely unplugged memory blocks mm/memory_hotplug: Introduce offline_and_remove_memory() virtio-mem: Allow to offline partially unplugged memory blocks mm: Allow to offline unmovable PageOffline() pages via MEM_GOING_OFFLINE virtio-mem: Paravirtualized memory hotunplug part 2 virtio-mem: Paravirtualized memory hotunplug part 1 ...
This commit is contained in:
commit
09102704c6
18
MAINTAINERS
18
MAINTAINERS
@ -18114,9 +18114,18 @@ F: drivers/virtio/
|
||||
F: include/linux/vdpa.h
|
||||
F: include/linux/virtio*.h
|
||||
F: include/uapi/linux/virtio_*.h
|
||||
F: mm/balloon_compaction.c
|
||||
F: tools/virtio/
|
||||
|
||||
VIRTIO BALLOON
|
||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
F: drivers/virtio/virtio_balloon.c
|
||||
F: include/uapi/linux/virtio_balloon.h
|
||||
F: include/linux/balloon_compaction.h
|
||||
F: mm/balloon_compaction.c
|
||||
|
||||
VIRTIO CRYPTO DRIVER
|
||||
M: Gonglei <arei.gonglei@huawei.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
@ -18182,6 +18191,13 @@ S: Maintained
|
||||
F: drivers/iommu/virtio-iommu.c
|
||||
F: include/uapi/linux/virtio_iommu.h
|
||||
|
||||
VIRTIO MEM DRIVER
|
||||
M: David Hildenbrand <david@redhat.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
F: drivers/virtio/virtio_mem.c
|
||||
F: include/uapi/linux/virtio_mem.h
|
||||
|
||||
VIRTUAL BOX GUEST DEVICE DRIVER
|
||||
M: Hans de Goede <hdegoede@redhat.com>
|
||||
M: Arnd Bergmann <arnd@arndb.de>
|
||||
|
@ -35,6 +35,7 @@ int pxm_to_node(int pxm)
|
||||
return NUMA_NO_NODE;
|
||||
return pxm_to_node_map[pxm];
|
||||
}
|
||||
EXPORT_SYMBOL(pxm_to_node);
|
||||
|
||||
int node_to_pxm(int node)
|
||||
{
|
||||
|
@ -350,13 +350,18 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
|
||||
int err;
|
||||
unsigned long flags;
|
||||
struct scatterlist outhdr, iv_sg, status_sg, **sgs;
|
||||
int i;
|
||||
u64 dst_len;
|
||||
unsigned int num_out = 0, num_in = 0;
|
||||
int sg_total;
|
||||
uint8_t *iv;
|
||||
struct scatterlist *sg;
|
||||
|
||||
src_nents = sg_nents_for_len(req->src, req->cryptlen);
|
||||
if (src_nents < 0) {
|
||||
pr_err("Invalid number of src SG.\n");
|
||||
return src_nents;
|
||||
}
|
||||
|
||||
dst_nents = sg_nents(req->dst);
|
||||
|
||||
pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
|
||||
@ -402,6 +407,7 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
|
||||
goto free;
|
||||
}
|
||||
|
||||
dst_len = min_t(unsigned int, req->cryptlen, dst_len);
|
||||
pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
|
||||
req->cryptlen, dst_len);
|
||||
|
||||
@ -442,12 +448,12 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
|
||||
vc_sym_req->iv = iv;
|
||||
|
||||
/* Source data */
|
||||
for (i = 0; i < src_nents; i++)
|
||||
sgs[num_out++] = &req->src[i];
|
||||
for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
|
||||
sgs[num_out++] = sg;
|
||||
|
||||
/* Destination data */
|
||||
for (i = 0; i < dst_nents; i++)
|
||||
sgs[num_out + num_in++] = &req->dst[i];
|
||||
for (sg = req->dst; sg; sg = sg_next(sg))
|
||||
sgs[num_out + num_in++] = sg;
|
||||
|
||||
/* Status */
|
||||
sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
|
||||
@ -577,10 +583,11 @@ static void virtio_crypto_skcipher_finalize_req(
|
||||
scatterwalk_map_and_copy(req->iv, req->dst,
|
||||
req->cryptlen - AES_BLOCK_SIZE,
|
||||
AES_BLOCK_SIZE, 0);
|
||||
crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
|
||||
req, err);
|
||||
kzfree(vc_sym_req->iv);
|
||||
virtcrypto_clear_request(&vc_sym_req->base);
|
||||
|
||||
crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
|
||||
req, err);
|
||||
}
|
||||
|
||||
static struct virtio_crypto_algo virtio_crypto_algs[] = { {
|
||||
|
@ -116,7 +116,7 @@ config MIC_COSM
|
||||
|
||||
config VOP
|
||||
tristate "VOP Driver"
|
||||
depends on VOP_BUS && VHOST_DPN
|
||||
depends on VOP_BUS
|
||||
select VHOST_RING
|
||||
select VIRTIO
|
||||
help
|
||||
|
@ -50,7 +50,7 @@ config CAIF_HSI
|
||||
|
||||
config CAIF_VIRTIO
|
||||
tristate "CAIF virtio transport driver"
|
||||
depends on CAIF && HAS_DMA && VHOST_DPN
|
||||
depends on CAIF && HAS_DMA
|
||||
select VHOST_RING
|
||||
select VIRTIO
|
||||
select GENERIC_ALLOCATOR
|
||||
|
@ -10,7 +10,7 @@ if VDPA
|
||||
|
||||
config VDPA_SIM
|
||||
tristate "vDPA device simulator"
|
||||
depends on RUNTIME_TESTING_MENU && HAS_DMA && VHOST_DPN
|
||||
depends on RUNTIME_TESTING_MENU && HAS_DMA
|
||||
select VHOST_RING
|
||||
default n
|
||||
help
|
||||
|
@ -185,6 +185,9 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
|
||||
|
||||
void ifcvf_reset(struct ifcvf_hw *hw)
|
||||
{
|
||||
hw->config_cb.callback = NULL;
|
||||
hw->config_cb.private = NULL;
|
||||
|
||||
ifcvf_set_status(hw, 0);
|
||||
/* flush set_status, make sure VF is stopped, reset */
|
||||
ifcvf_get_status(hw);
|
||||
|
@ -27,6 +27,7 @@
|
||||
((1ULL << VIRTIO_NET_F_MAC) | \
|
||||
(1ULL << VIRTIO_F_ANY_LAYOUT) | \
|
||||
(1ULL << VIRTIO_F_VERSION_1) | \
|
||||
(1ULL << VIRTIO_NET_F_STATUS) | \
|
||||
(1ULL << VIRTIO_F_ORDER_PLATFORM) | \
|
||||
(1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
|
||||
(1ULL << VIRTIO_NET_F_MRG_RXBUF))
|
||||
@ -81,6 +82,9 @@ struct ifcvf_hw {
|
||||
void __iomem *net_cfg;
|
||||
struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
|
||||
void __iomem * const *base;
|
||||
char config_msix_name[256];
|
||||
struct vdpa_callback config_cb;
|
||||
|
||||
};
|
||||
|
||||
struct ifcvf_adapter {
|
||||
|
@ -18,6 +18,16 @@
|
||||
#define DRIVER_AUTHOR "Intel Corporation"
|
||||
#define IFCVF_DRIVER_NAME "ifcvf"
|
||||
|
||||
static irqreturn_t ifcvf_config_changed(int irq, void *arg)
|
||||
{
|
||||
struct ifcvf_hw *vf = arg;
|
||||
|
||||
if (vf->config_cb.callback)
|
||||
return vf->config_cb.callback(vf->config_cb.private);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
|
||||
{
|
||||
struct vring_info *vring = arg;
|
||||
@ -28,6 +38,68 @@ static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void ifcvf_free_irq_vectors(void *data)
|
||||
{
|
||||
pci_free_irq_vectors(data);
|
||||
}
|
||||
|
||||
static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
|
||||
{
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
struct ifcvf_hw *vf = &adapter->vf;
|
||||
int i;
|
||||
|
||||
|
||||
for (i = 0; i < queues; i++)
|
||||
devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
|
||||
|
||||
ifcvf_free_irq_vectors(pdev);
|
||||
}
|
||||
|
||||
static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
|
||||
{
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
struct ifcvf_hw *vf = &adapter->vf;
|
||||
int vector, i, ret, irq;
|
||||
|
||||
ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
|
||||
IFCVF_MAX_INTR, PCI_IRQ_MSIX);
|
||||
if (ret < 0) {
|
||||
IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
|
||||
pci_name(pdev));
|
||||
vector = 0;
|
||||
irq = pci_irq_vector(pdev, vector);
|
||||
ret = devm_request_irq(&pdev->dev, irq,
|
||||
ifcvf_config_changed, 0,
|
||||
vf->config_msix_name, vf);
|
||||
|
||||
for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
|
||||
snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
|
||||
pci_name(pdev), i);
|
||||
vector = i + IFCVF_MSI_QUEUE_OFF;
|
||||
irq = pci_irq_vector(pdev, vector);
|
||||
ret = devm_request_irq(&pdev->dev, irq,
|
||||
ifcvf_intr_handler, 0,
|
||||
vf->vring[i].msix_name,
|
||||
&vf->vring[i]);
|
||||
if (ret) {
|
||||
IFCVF_ERR(pdev,
|
||||
"Failed to request irq for vq %d\n", i);
|
||||
ifcvf_free_irq(adapter, i);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
vf->vring[i].irq = irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ifcvf_start_datapath(void *private)
|
||||
{
|
||||
struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
|
||||
@ -118,17 +190,37 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
|
||||
{
|
||||
struct ifcvf_adapter *adapter;
|
||||
struct ifcvf_hw *vf;
|
||||
u8 status_old;
|
||||
int ret;
|
||||
|
||||
vf = vdpa_to_vf(vdpa_dev);
|
||||
adapter = dev_get_drvdata(vdpa_dev->dev.parent);
|
||||
status_old = ifcvf_get_status(vf);
|
||||
|
||||
if (status_old == status)
|
||||
return;
|
||||
|
||||
if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
|
||||
!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
||||
ifcvf_stop_datapath(adapter);
|
||||
ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
|
||||
}
|
||||
|
||||
if (status == 0) {
|
||||
ifcvf_stop_datapath(adapter);
|
||||
ifcvf_reset_vring(adapter);
|
||||
return;
|
||||
}
|
||||
|
||||
if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
|
||||
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
|
||||
!(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
||||
ret = ifcvf_request_irq(adapter);
|
||||
if (ret) {
|
||||
status = ifcvf_get_status(vf);
|
||||
status |= VIRTIO_CONFIG_S_FAILED;
|
||||
ifcvf_set_status(vf, status);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ifcvf_start_datapath(adapter) < 0)
|
||||
IFCVF_ERR(adapter->pdev,
|
||||
"Failed to set ifcvf vdpa status %u\n",
|
||||
@ -254,7 +346,10 @@ static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
|
||||
static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
|
||||
struct vdpa_callback *cb)
|
||||
{
|
||||
/* We don't support config interrupt */
|
||||
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
|
||||
|
||||
vf->config_cb.callback = cb->callback;
|
||||
vf->config_cb.private = cb->private;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -284,38 +379,6 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
|
||||
.set_config_cb = ifcvf_vdpa_set_config_cb,
|
||||
};
|
||||
|
||||
static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
|
||||
{
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
struct ifcvf_hw *vf = &adapter->vf;
|
||||
int vector, i, ret, irq;
|
||||
|
||||
|
||||
for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
|
||||
snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
|
||||
pci_name(pdev), i);
|
||||
vector = i + IFCVF_MSI_QUEUE_OFF;
|
||||
irq = pci_irq_vector(pdev, vector);
|
||||
ret = devm_request_irq(&pdev->dev, irq,
|
||||
ifcvf_intr_handler, 0,
|
||||
vf->vring[i].msix_name,
|
||||
&vf->vring[i]);
|
||||
if (ret) {
|
||||
IFCVF_ERR(pdev,
|
||||
"Failed to request irq for vq %d\n", i);
|
||||
return ret;
|
||||
}
|
||||
vf->vring[i].irq = irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ifcvf_free_irq_vectors(void *data)
|
||||
{
|
||||
pci_free_irq_vectors(data);
|
||||
}
|
||||
|
||||
static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
@ -349,13 +412,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
|
||||
IFCVF_MAX_INTR, PCI_IRQ_MSIX);
|
||||
if (ret < 0) {
|
||||
IFCVF_ERR(pdev, "Failed to alloc irq vectors\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
|
||||
if (ret) {
|
||||
IFCVF_ERR(pdev,
|
||||
@ -379,12 +435,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
adapter->pdev = pdev;
|
||||
adapter->vdpa.dma_dev = &pdev->dev;
|
||||
|
||||
ret = ifcvf_request_irq(adapter);
|
||||
if (ret) {
|
||||
IFCVF_ERR(pdev, "Failed to request MSI-X irq\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = ifcvf_init_hw(vf, pdev);
|
||||
if (ret) {
|
||||
IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
|
||||
|
@ -101,7 +101,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
|
||||
|
||||
static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
|
||||
{
|
||||
vq->ready = 0;
|
||||
vq->ready = false;
|
||||
vq->desc_addr = 0;
|
||||
vq->driver_addr = 0;
|
||||
vq->device_addr = 0;
|
||||
@ -131,9 +131,10 @@ static void vdpasim_work(struct work_struct *work)
|
||||
vdpasim, work);
|
||||
struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
|
||||
struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
|
||||
size_t read, write, total_write;
|
||||
int err;
|
||||
ssize_t read, write;
|
||||
size_t total_write;
|
||||
int pkts = 0;
|
||||
int err;
|
||||
|
||||
spin_lock(&vdpasim->lock);
|
||||
|
||||
|
@ -13,15 +13,6 @@ config VHOST_RING
|
||||
This option is selected by any driver which needs to access
|
||||
the host side of a virtio ring.
|
||||
|
||||
config VHOST_DPN
|
||||
bool
|
||||
depends on !ARM || AEABI
|
||||
default y
|
||||
help
|
||||
Anything selecting VHOST or VHOST_RING must depend on VHOST_DPN.
|
||||
This excludes the deprecated ARM ABI since that forces a 4 byte
|
||||
alignment on all structs - incompatible with virtio spec requirements.
|
||||
|
||||
config VHOST
|
||||
tristate
|
||||
select VHOST_IOTLB
|
||||
@ -37,7 +28,7 @@ if VHOST_MENU
|
||||
|
||||
config VHOST_NET
|
||||
tristate "Host kernel accelerator for virtio net"
|
||||
depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP) && VHOST_DPN
|
||||
depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP)
|
||||
select VHOST
|
||||
---help---
|
||||
This kernel module can be loaded in host kernel to accelerate
|
||||
@ -49,7 +40,7 @@ config VHOST_NET
|
||||
|
||||
config VHOST_SCSI
|
||||
tristate "VHOST_SCSI TCM fabric driver"
|
||||
depends on TARGET_CORE && EVENTFD && VHOST_DPN
|
||||
depends on TARGET_CORE && EVENTFD
|
||||
select VHOST
|
||||
default n
|
||||
---help---
|
||||
@ -58,7 +49,7 @@ config VHOST_SCSI
|
||||
|
||||
config VHOST_VSOCK
|
||||
tristate "vhost virtio-vsock driver"
|
||||
depends on VSOCKETS && EVENTFD && VHOST_DPN
|
||||
depends on VSOCKETS && EVENTFD
|
||||
select VHOST
|
||||
select VIRTIO_VSOCKETS_COMMON
|
||||
default n
|
||||
@ -72,7 +63,7 @@ config VHOST_VSOCK
|
||||
|
||||
config VHOST_VDPA
|
||||
tristate "Vhost driver for vDPA-based backend"
|
||||
depends on EVENTFD && VHOST_DPN
|
||||
depends on EVENTFD
|
||||
select VHOST
|
||||
depends on VDPA
|
||||
help
|
||||
|
@ -1327,7 +1327,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
|
||||
}
|
||||
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
|
||||
UIO_MAXIOV + VHOST_NET_BATCH,
|
||||
VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT,
|
||||
VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
|
||||
NULL);
|
||||
|
||||
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
|
||||
|
@ -1628,7 +1628,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
|
||||
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
|
||||
}
|
||||
vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
|
||||
VHOST_SCSI_WEIGHT, 0, NULL);
|
||||
VHOST_SCSI_WEIGHT, 0, true, NULL);
|
||||
|
||||
vhost_scsi_init_inflight(vs, NULL);
|
||||
|
||||
|
@ -120,7 +120,7 @@ static int vhost_test_open(struct inode *inode, struct file *f)
|
||||
vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
|
||||
n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
|
||||
vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
|
||||
VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, NULL);
|
||||
VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL);
|
||||
|
||||
f->private_data = n;
|
||||
|
||||
|
@ -15,12 +15,14 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/vdpa.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/vhost.h>
|
||||
#include <linux/virtio_net.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include "vhost.h"
|
||||
|
||||
@ -70,6 +72,7 @@ struct vhost_vdpa {
|
||||
int nvqs;
|
||||
int virtio_id;
|
||||
int minor;
|
||||
struct eventfd_ctx *config_ctx;
|
||||
};
|
||||
|
||||
static DEFINE_IDA(vhost_vdpa_ida);
|
||||
@ -101,6 +104,17 @@ static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t vhost_vdpa_config_cb(void *private)
|
||||
{
|
||||
struct vhost_vdpa *v = private;
|
||||
struct eventfd_ctx *config_ctx = v->config_ctx;
|
||||
|
||||
if (config_ctx)
|
||||
eventfd_signal(config_ctx, 1);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_reset(struct vhost_vdpa *v)
|
||||
{
|
||||
struct vdpa_device *vdpa = v->vdpa;
|
||||
@ -288,6 +302,36 @@ static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vhost_vdpa_config_put(struct vhost_vdpa *v)
|
||||
{
|
||||
if (v->config_ctx)
|
||||
eventfd_ctx_put(v->config_ctx);
|
||||
}
|
||||
|
||||
static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
|
||||
{
|
||||
struct vdpa_callback cb;
|
||||
int fd;
|
||||
struct eventfd_ctx *ctx;
|
||||
|
||||
cb.callback = vhost_vdpa_config_cb;
|
||||
cb.private = v->vdpa;
|
||||
if (copy_from_user(&fd, argp, sizeof(fd)))
|
||||
return -EFAULT;
|
||||
|
||||
ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
|
||||
swap(ctx, v->config_ctx);
|
||||
|
||||
if (!IS_ERR_OR_NULL(ctx))
|
||||
eventfd_ctx_put(ctx);
|
||||
|
||||
if (IS_ERR(v->config_ctx))
|
||||
return PTR_ERR(v->config_ctx);
|
||||
|
||||
v->vdpa->config->set_config_cb(v->vdpa, &cb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
|
||||
void __user *argp)
|
||||
{
|
||||
@ -395,6 +439,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
|
||||
case VHOST_SET_LOG_FD:
|
||||
r = -ENOIOCTLCMD;
|
||||
break;
|
||||
case VHOST_VDPA_SET_CONFIG_CALL:
|
||||
r = vhost_vdpa_set_config_call(v, argp);
|
||||
break;
|
||||
default:
|
||||
r = vhost_dev_ioctl(&v->vdev, cmd, argp);
|
||||
if (r == -ENOIOCTLCMD)
|
||||
@ -694,7 +741,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
|
||||
vqs[i] = &v->vqs[i];
|
||||
vqs[i]->handle_kick = handle_vq_kick;
|
||||
}
|
||||
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0,
|
||||
vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
|
||||
vhost_vdpa_process_iotlb_msg);
|
||||
|
||||
dev->iotlb = vhost_iotlb_alloc(0, 0);
|
||||
@ -729,6 +776,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
|
||||
vhost_dev_stop(&v->vdev);
|
||||
vhost_vdpa_iotlb_free(v);
|
||||
vhost_vdpa_free_domain(v);
|
||||
vhost_vdpa_config_put(v);
|
||||
vhost_dev_cleanup(&v->vdev);
|
||||
kfree(v->vdev.vqs);
|
||||
mutex_unlock(&d->mutex);
|
||||
@ -739,12 +787,74 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
|
||||
struct vdpa_device *vdpa = v->vdpa;
|
||||
const struct vdpa_config_ops *ops = vdpa->config;
|
||||
struct vdpa_notification_area notify;
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
u16 index = vma->vm_pgoff;
|
||||
|
||||
notify = ops->get_vq_notification(vdpa, index);
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
|
||||
notify.addr >> PAGE_SHIFT, PAGE_SIZE,
|
||||
vma->vm_page_prot))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct vhost_vdpa_vm_ops = {
|
||||
.fault = vhost_vdpa_fault,
|
||||
};
|
||||
|
||||
static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct vhost_vdpa *v = vma->vm_file->private_data;
|
||||
struct vdpa_device *vdpa = v->vdpa;
|
||||
const struct vdpa_config_ops *ops = vdpa->config;
|
||||
struct vdpa_notification_area notify;
|
||||
int index = vma->vm_pgoff;
|
||||
|
||||
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
if ((vma->vm_flags & VM_SHARED) == 0)
|
||||
return -EINVAL;
|
||||
if (vma->vm_flags & VM_READ)
|
||||
return -EINVAL;
|
||||
if (index > 65535)
|
||||
return -EINVAL;
|
||||
if (!ops->get_vq_notification)
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* To be safe and easily modelled by userspace, We only
|
||||
* support the doorbell which sits on the page boundary and
|
||||
* does not share the page with other registers.
|
||||
*/
|
||||
notify = ops->get_vq_notification(vdpa, index);
|
||||
if (notify.addr & (PAGE_SIZE - 1))
|
||||
return -EINVAL;
|
||||
if (vma->vm_end - vma->vm_start != notify.size)
|
||||
return -ENOTSUPP;
|
||||
|
||||
vma->vm_ops = &vhost_vdpa_vm_ops;
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
static const struct file_operations vhost_vdpa_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = vhost_vdpa_open,
|
||||
.release = vhost_vdpa_release,
|
||||
.write_iter = vhost_vdpa_chr_write_iter,
|
||||
.unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
|
||||
#ifdef CONFIG_MMU
|
||||
.mmap = vhost_vdpa_mmap,
|
||||
#endif /* CONFIG_MMU */
|
||||
.compat_ioctl = compat_ptr_ioctl,
|
||||
};
|
||||
|
||||
|
@ -166,11 +166,16 @@ static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
|
||||
void *key)
|
||||
{
|
||||
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
|
||||
struct vhost_work *work = &poll->work;
|
||||
|
||||
if (!(key_to_poll(key) & poll->mask))
|
||||
return 0;
|
||||
|
||||
vhost_poll_queue(poll);
|
||||
if (!poll->dev->use_worker)
|
||||
work->fn(work);
|
||||
else
|
||||
vhost_poll_queue(poll);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -454,6 +459,7 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
|
||||
void vhost_dev_init(struct vhost_dev *dev,
|
||||
struct vhost_virtqueue **vqs, int nvqs,
|
||||
int iov_limit, int weight, int byte_weight,
|
||||
bool use_worker,
|
||||
int (*msg_handler)(struct vhost_dev *dev,
|
||||
struct vhost_iotlb_msg *msg))
|
||||
{
|
||||
@ -471,6 +477,7 @@ void vhost_dev_init(struct vhost_dev *dev,
|
||||
dev->iov_limit = iov_limit;
|
||||
dev->weight = weight;
|
||||
dev->byte_weight = byte_weight;
|
||||
dev->use_worker = use_worker;
|
||||
dev->msg_handler = msg_handler;
|
||||
init_llist_head(&dev->work_list);
|
||||
init_waitqueue_head(&dev->wait);
|
||||
@ -534,6 +541,36 @@ bool vhost_dev_has_owner(struct vhost_dev *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
|
||||
|
||||
static void vhost_attach_mm(struct vhost_dev *dev)
|
||||
{
|
||||
/* No owner, become one */
|
||||
if (dev->use_worker) {
|
||||
dev->mm = get_task_mm(current);
|
||||
} else {
|
||||
/* vDPA device does not use worker thead, so there's
|
||||
* no need to hold the address space for mm. This help
|
||||
* to avoid deadlock in the case of mmap() which may
|
||||
* held the refcnt of the file and depends on release
|
||||
* method to remove vma.
|
||||
*/
|
||||
dev->mm = current->mm;
|
||||
mmgrab(dev->mm);
|
||||
}
|
||||
}
|
||||
|
||||
static void vhost_detach_mm(struct vhost_dev *dev)
|
||||
{
|
||||
if (!dev->mm)
|
||||
return;
|
||||
|
||||
if (dev->use_worker)
|
||||
mmput(dev->mm);
|
||||
else
|
||||
mmdrop(dev->mm);
|
||||
|
||||
dev->mm = NULL;
|
||||
}
|
||||
|
||||
/* Caller should have device mutex */
|
||||
long vhost_dev_set_owner(struct vhost_dev *dev)
|
||||
{
|
||||
@ -546,34 +583,37 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
|
||||
goto err_mm;
|
||||
}
|
||||
|
||||
/* No owner, become one */
|
||||
dev->mm = get_task_mm(current);
|
||||
vhost_attach_mm(dev);
|
||||
|
||||
dev->kcov_handle = kcov_common_handle();
|
||||
worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
goto err_worker;
|
||||
if (dev->use_worker) {
|
||||
worker = kthread_create(vhost_worker, dev,
|
||||
"vhost-%d", current->pid);
|
||||
if (IS_ERR(worker)) {
|
||||
err = PTR_ERR(worker);
|
||||
goto err_worker;
|
||||
}
|
||||
|
||||
dev->worker = worker;
|
||||
wake_up_process(worker); /* avoid contributing to loadavg */
|
||||
|
||||
err = vhost_attach_cgroups(dev);
|
||||
if (err)
|
||||
goto err_cgroup;
|
||||
}
|
||||
|
||||
dev->worker = worker;
|
||||
wake_up_process(worker); /* avoid contributing to loadavg */
|
||||
|
||||
err = vhost_attach_cgroups(dev);
|
||||
if (err)
|
||||
goto err_cgroup;
|
||||
|
||||
err = vhost_dev_alloc_iovecs(dev);
|
||||
if (err)
|
||||
goto err_cgroup;
|
||||
|
||||
return 0;
|
||||
err_cgroup:
|
||||
kthread_stop(worker);
|
||||
dev->worker = NULL;
|
||||
if (dev->worker) {
|
||||
kthread_stop(dev->worker);
|
||||
dev->worker = NULL;
|
||||
}
|
||||
err_worker:
|
||||
if (dev->mm)
|
||||
mmput(dev->mm);
|
||||
dev->mm = NULL;
|
||||
vhost_detach_mm(dev);
|
||||
dev->kcov_handle = 0;
|
||||
err_mm:
|
||||
return err;
|
||||
@ -670,9 +710,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
|
||||
dev->worker = NULL;
|
||||
dev->kcov_handle = 0;
|
||||
}
|
||||
if (dev->mm)
|
||||
mmput(dev->mm);
|
||||
dev->mm = NULL;
|
||||
vhost_detach_mm(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
|
||||
|
||||
@ -882,7 +920,7 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
|
||||
|
||||
#define vhost_put_user(vq, x, ptr) \
|
||||
({ \
|
||||
int ret = -EFAULT; \
|
||||
int ret; \
|
||||
if (!vq->iotlb) { \
|
||||
ret = __put_user(x, ptr); \
|
||||
} else { \
|
||||
@ -1244,9 +1282,9 @@ static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
|
||||
}
|
||||
|
||||
static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
|
||||
struct vring_desc __user *desc,
|
||||
struct vring_avail __user *avail,
|
||||
struct vring_used __user *used)
|
||||
vring_desc_t __user *desc,
|
||||
vring_avail_t __user *avail,
|
||||
vring_used_t __user *used)
|
||||
|
||||
{
|
||||
return access_ok(desc, vhost_get_desc_size(vq, num)) &&
|
||||
@ -1574,7 +1612,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
|
||||
r = -EFAULT;
|
||||
break;
|
||||
}
|
||||
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
|
||||
eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd);
|
||||
if (IS_ERR(eventfp)) {
|
||||
r = PTR_ERR(eventfp);
|
||||
break;
|
||||
@ -1590,7 +1628,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
|
||||
r = -EFAULT;
|
||||
break;
|
||||
}
|
||||
ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
|
||||
ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
|
||||
if (IS_ERR(ctx)) {
|
||||
r = PTR_ERR(ctx);
|
||||
break;
|
||||
@ -1602,7 +1640,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
|
||||
r = -EFAULT;
|
||||
break;
|
||||
}
|
||||
ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
|
||||
ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd);
|
||||
if (IS_ERR(ctx)) {
|
||||
r = PTR_ERR(ctx);
|
||||
break;
|
||||
@ -1727,7 +1765,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
|
||||
r = get_user(fd, (int __user *)argp);
|
||||
if (r < 0)
|
||||
break;
|
||||
ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
|
||||
ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
|
||||
if (IS_ERR(ctx)) {
|
||||
r = PTR_ERR(ctx);
|
||||
break;
|
||||
@ -2300,7 +2338,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
|
||||
struct vring_used_elem *heads,
|
||||
unsigned count)
|
||||
{
|
||||
struct vring_used_elem __user *used;
|
||||
vring_used_elem_t __user *used;
|
||||
u16 old, new;
|
||||
int start;
|
||||
|
||||
|
@ -67,9 +67,9 @@ struct vhost_virtqueue {
|
||||
/* The actual ring of buffers. */
|
||||
struct mutex mutex;
|
||||
unsigned int num;
|
||||
struct vring_desc __user *desc;
|
||||
struct vring_avail __user *avail;
|
||||
struct vring_used __user *used;
|
||||
vring_desc_t __user *desc;
|
||||
vring_avail_t __user *avail;
|
||||
vring_used_t __user *used;
|
||||
const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
|
||||
struct file *kick;
|
||||
struct eventfd_ctx *call_ctx;
|
||||
@ -154,6 +154,7 @@ struct vhost_dev {
|
||||
int weight;
|
||||
int byte_weight;
|
||||
u64 kcov_handle;
|
||||
bool use_worker;
|
||||
int (*msg_handler)(struct vhost_dev *dev,
|
||||
struct vhost_iotlb_msg *msg);
|
||||
};
|
||||
@ -161,6 +162,7 @@ struct vhost_dev {
|
||||
bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
|
||||
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
|
||||
int nvqs, int iov_limit, int weight, int byte_weight,
|
||||
bool use_worker,
|
||||
int (*msg_handler)(struct vhost_dev *dev,
|
||||
struct vhost_iotlb_msg *msg));
|
||||
long vhost_dev_set_owner(struct vhost_dev *dev);
|
||||
|
@ -620,9 +620,9 @@ static inline int xfer_to_user(const struct vringh *vrh,
|
||||
*/
|
||||
int vringh_init_user(struct vringh *vrh, u64 features,
|
||||
unsigned int num, bool weak_barriers,
|
||||
struct vring_desc __user *desc,
|
||||
struct vring_avail __user *avail,
|
||||
struct vring_used __user *used)
|
||||
vring_desc_t __user *desc,
|
||||
vring_avail_t __user *avail,
|
||||
vring_used_t __user *used)
|
||||
{
|
||||
/* Sane power of 2 please! */
|
||||
if (!num || num > 0xffff || (num & (num - 1))) {
|
||||
|
@ -632,7 +632,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
|
||||
|
||||
vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
|
||||
UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
|
||||
VHOST_VSOCK_WEIGHT, NULL);
|
||||
VHOST_VSOCK_WEIGHT, true, NULL);
|
||||
|
||||
file->private_data = vsock;
|
||||
spin_lock_init(&vsock->send_pkt_list_lock);
|
||||
|
@ -78,6 +78,23 @@ config VIRTIO_BALLOON
|
||||
|
||||
If unsure, say M.
|
||||
|
||||
config VIRTIO_MEM
|
||||
tristate "Virtio mem driver"
|
||||
default m
|
||||
depends on X86_64
|
||||
depends on VIRTIO
|
||||
depends on MEMORY_HOTPLUG_SPARSE
|
||||
depends on MEMORY_HOTREMOVE
|
||||
select CONTIG_ALLOC
|
||||
help
|
||||
This driver provides access to virtio-mem paravirtualized memory
|
||||
devices, allowing to hotplug and hotunplug memory.
|
||||
|
||||
This driver was only tested under x86-64, but should theoretically
|
||||
work on all architectures that support memory hotplug and hotremove.
|
||||
|
||||
If unsure, say M.
|
||||
|
||||
config VIRTIO_INPUT
|
||||
tristate "Virtio input driver"
|
||||
depends on VIRTIO
|
||||
|
@ -7,3 +7,4 @@ virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
|
||||
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
|
||||
obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
|
||||
obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
|
||||
obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o
|
||||
|
@ -1107,11 +1107,18 @@ static int virtballoon_restore(struct virtio_device *vdev)
|
||||
|
||||
static int virtballoon_validate(struct virtio_device *vdev)
|
||||
{
|
||||
/* Tell the host whether we care about poisoned pages. */
|
||||
/*
|
||||
* Inform the hypervisor that our pages are poisoned or
|
||||
* initialized. If we cannot do that then we should disable
|
||||
* page reporting as it could potentially change the contents
|
||||
* of our free pages.
|
||||
*/
|
||||
if (!want_init_on_free() &&
|
||||
(IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY) ||
|
||||
!page_poisoning_enabled()))
|
||||
__virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
|
||||
else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON))
|
||||
__virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING);
|
||||
|
||||
__virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
|
||||
return 0;
|
||||
|
1965
drivers/virtio/virtio_mem.c
Normal file
1965
drivers/virtio/virtio_mem.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -466,10 +466,8 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
int irq = platform_get_irq(vm_dev->pdev, 0);
|
||||
int i, err, queue_idx = 0;
|
||||
|
||||
if (irq < 0) {
|
||||
dev_err(&vdev->dev, "Cannot get IRQ resource\n");
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
|
||||
err = request_irq(irq, vm_interrupt, IRQF_SHARED,
|
||||
dev_name(&vdev->dev), vm_dev);
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include <linux/delay.h>
|
||||
#define VIRTIO_PCI_NO_LEGACY
|
||||
#define VIRTIO_RING_NO_LEGACY
|
||||
#include "virtio_pci_common.h"
|
||||
|
||||
/*
|
||||
|
@ -318,6 +318,7 @@ extern void try_offline_node(int nid);
|
||||
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
|
||||
extern int remove_memory(int nid, u64 start, u64 size);
|
||||
extern void __remove_memory(int nid, u64 start, u64 size);
|
||||
extern int offline_and_remove_memory(int nid, u64 start, u64 size);
|
||||
|
||||
#else
|
||||
static inline void try_offline_node(int nid) {}
|
||||
|
@ -777,6 +777,16 @@ PAGE_TYPE_OPS(Buddy, buddy)
|
||||
* not onlined when onlining the section).
|
||||
* The content of these pages is effectively stale. Such pages should not
|
||||
* be touched (read/write/dump/save) except by their owner.
|
||||
*
|
||||
* If a driver wants to allow to offline unmovable PageOffline() pages without
|
||||
* putting them back to the buddy, it can do so via the memory notifier by
|
||||
* decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
|
||||
* reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
|
||||
* pages (now with a reference count of zero) are treated like free pages,
|
||||
* allowing the containing memory block to get offlined. A driver that
|
||||
* relies on this feature is aware that re-onlining the memory block will
|
||||
* require to re-set the pages PageOffline() and not giving them to the
|
||||
* buddy via online_page_callback_t.
|
||||
*/
|
||||
PAGE_TYPE_OPS(Offline, offline)
|
||||
|
||||
|
@ -17,6 +17,16 @@ struct vdpa_callback {
|
||||
void *private;
|
||||
};
|
||||
|
||||
/**
|
||||
* vDPA notification area
|
||||
* @addr: base address of the notification area
|
||||
* @size: size of the notification area
|
||||
*/
|
||||
struct vdpa_notification_area {
|
||||
resource_size_t addr;
|
||||
resource_size_t size;
|
||||
};
|
||||
|
||||
/**
|
||||
* vDPA device - representation of a vDPA device
|
||||
* @dev: underlying device
|
||||
@ -73,6 +83,10 @@ struct vdpa_device {
|
||||
* @vdev: vdpa device
|
||||
* @idx: virtqueue index
|
||||
* Returns virtqueue state (last_avail_idx)
|
||||
* @get_vq_notification: Get the notification area for a virtqueue
|
||||
* @vdev: vdpa device
|
||||
* @idx: virtqueue index
|
||||
* Returns the notifcation area
|
||||
* @get_vq_align: Get the virtqueue align requirement
|
||||
* for the device
|
||||
* @vdev: vdpa device
|
||||
@ -162,6 +176,8 @@ struct vdpa_config_ops {
|
||||
bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx);
|
||||
int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state);
|
||||
u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx);
|
||||
struct vdpa_notification_area
|
||||
(*get_vq_notification)(struct vdpa_device *vdev, u16 idx);
|
||||
|
||||
/* Device ops */
|
||||
u32 (*get_vq_align)(struct vdpa_device *vdev);
|
||||
|
@ -105,9 +105,9 @@ struct vringh_kiov {
|
||||
/* Helpers for userspace vrings. */
|
||||
int vringh_init_user(struct vringh *vrh, u64 features,
|
||||
unsigned int num, bool weak_barriers,
|
||||
struct vring_desc __user *desc,
|
||||
struct vring_avail __user *avail,
|
||||
struct vring_used __user *used);
|
||||
vring_desc_t __user *desc,
|
||||
vring_avail_t __user *avail,
|
||||
vring_used_t __user *used);
|
||||
|
||||
static inline void vringh_iov_init(struct vringh_iov *iov,
|
||||
struct iovec *iovec, unsigned num)
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define VHOST_FILE_UNBIND -1
|
||||
|
||||
/* ioctls */
|
||||
|
||||
#define VHOST_VIRTIO 0xAF
|
||||
@ -140,4 +142,6 @@
|
||||
/* Get the max ring size. */
|
||||
#define VHOST_VDPA_GET_VRING_NUM _IOR(VHOST_VIRTIO, 0x76, __u16)
|
||||
|
||||
/* Set event fd for config interrupt*/
|
||||
#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int)
|
||||
#endif
|
||||
|
@ -44,6 +44,7 @@
|
||||
#define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */
|
||||
#define VIRTIO_ID_CRYPTO 20 /* virtio crypto */
|
||||
#define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */
|
||||
#define VIRTIO_ID_MEM 24 /* virtio mem */
|
||||
#define VIRTIO_ID_FS 26 /* virtio filesystem */
|
||||
#define VIRTIO_ID_PMEM 27 /* virtio pmem */
|
||||
#define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */
|
||||
|
211
include/uapi/linux/virtio_mem.h
Normal file
211
include/uapi/linux/virtio_mem.h
Normal file
@ -0,0 +1,211 @@
|
||||
/* SPDX-License-Identifier: BSD-3-Clause */
|
||||
/*
|
||||
* Virtio Mem Device
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2020
|
||||
*
|
||||
* Authors:
|
||||
* David Hildenbrand <david@redhat.com>
|
||||
*
|
||||
* This header is BSD licensed so anyone can use the definitions
|
||||
* to implement compatible drivers/servers:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of IBM nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR
|
||||
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
||||
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
|
||||
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_VIRTIO_MEM_H
|
||||
#define _LINUX_VIRTIO_MEM_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/virtio_types.h>
|
||||
#include <linux/virtio_ids.h>
|
||||
#include <linux/virtio_config.h>
|
||||
|
||||
/*
|
||||
* Each virtio-mem device manages a dedicated region in physical address
|
||||
* space. Each device can belong to a single NUMA node, multiple devices
|
||||
* for a single NUMA node are possible. A virtio-mem device is like a
|
||||
* "resizable DIMM" consisting of small memory blocks that can be plugged
|
||||
* or unplugged. The device driver is responsible for (un)plugging memory
|
||||
* blocks on demand.
|
||||
*
|
||||
* Virtio-mem devices can only operate on their assigned memory region in
|
||||
* order to (un)plug memory. A device cannot (un)plug memory belonging to
|
||||
* other devices.
|
||||
*
|
||||
* The "region_size" corresponds to the maximum amount of memory that can
|
||||
* be provided by a device. The "size" corresponds to the amount of memory
|
||||
* that is currently plugged. "requested_size" corresponds to a request
|
||||
* from the device to the device driver to (un)plug blocks. The
|
||||
* device driver should try to (un)plug blocks in order to reach the
|
||||
* "requested_size". It is impossible to plug more memory than requested.
|
||||
*
|
||||
* The "usable_region_size" represents the memory region that can actually
|
||||
* be used to (un)plug memory. It is always at least as big as the
|
||||
* "requested_size" and will grow dynamically. It will only shrink when
|
||||
* explicitly triggered (VIRTIO_MEM_REQ_UNPLUG).
|
||||
*
|
||||
* There are no guarantees what will happen if unplugged memory is
|
||||
* read/written. Such memory should, in general, not be touched. E.g.,
|
||||
* even writing might succeed, but the values will simply be discarded at
|
||||
* random points in time.
|
||||
*
|
||||
* It can happen that the device cannot process a request, because it is
|
||||
* busy. The device driver has to retry later.
|
||||
*
|
||||
* Usually, during system resets all memory will get unplugged, so the
|
||||
* device driver can start with a clean state. However, in specific
|
||||
* scenarios (if the device is busy) it can happen that the device still
|
||||
* has memory plugged. The device driver can request to unplug all memory
|
||||
* (VIRTIO_MEM_REQ_UNPLUG) - which might take a while to succeed if the
|
||||
* device is busy.
|
||||
*/
|
||||
|
||||
/* --- virtio-mem: feature bits --- */
|
||||
|
||||
/* node_id is an ACPI PXM and is valid */
|
||||
#define VIRTIO_MEM_F_ACPI_PXM 0
|
||||
|
||||
|
||||
/* --- virtio-mem: guest -> host requests --- */
|
||||
|
||||
/* request to plug memory blocks */
|
||||
#define VIRTIO_MEM_REQ_PLUG 0
|
||||
/* request to unplug memory blocks */
|
||||
#define VIRTIO_MEM_REQ_UNPLUG 1
|
||||
/* request to unplug all blocks and shrink the usable size */
|
||||
#define VIRTIO_MEM_REQ_UNPLUG_ALL 2
|
||||
/* request information about the plugged state of memory blocks */
|
||||
#define VIRTIO_MEM_REQ_STATE 3
|
||||
|
||||
struct virtio_mem_req_plug {
|
||||
__virtio64 addr;
|
||||
__virtio16 nb_blocks;
|
||||
__virtio16 padding[3];
|
||||
};
|
||||
|
||||
struct virtio_mem_req_unplug {
|
||||
__virtio64 addr;
|
||||
__virtio16 nb_blocks;
|
||||
__virtio16 padding[3];
|
||||
};
|
||||
|
||||
struct virtio_mem_req_state {
|
||||
__virtio64 addr;
|
||||
__virtio16 nb_blocks;
|
||||
__virtio16 padding[3];
|
||||
};
|
||||
|
||||
struct virtio_mem_req {
|
||||
__virtio16 type;
|
||||
__virtio16 padding[3];
|
||||
|
||||
union {
|
||||
struct virtio_mem_req_plug plug;
|
||||
struct virtio_mem_req_unplug unplug;
|
||||
struct virtio_mem_req_state state;
|
||||
} u;
|
||||
};
|
||||
|
||||
|
||||
/* --- virtio-mem: host -> guest response --- */
|
||||
|
||||
/*
|
||||
* Request processed successfully, applicable for
|
||||
* - VIRTIO_MEM_REQ_PLUG
|
||||
* - VIRTIO_MEM_REQ_UNPLUG
|
||||
* - VIRTIO_MEM_REQ_UNPLUG_ALL
|
||||
* - VIRTIO_MEM_REQ_STATE
|
||||
*/
|
||||
#define VIRTIO_MEM_RESP_ACK 0
|
||||
/*
|
||||
* Request denied - e.g. trying to plug more than requested, applicable for
|
||||
* - VIRTIO_MEM_REQ_PLUG
|
||||
*/
|
||||
#define VIRTIO_MEM_RESP_NACK 1
|
||||
/*
|
||||
* Request cannot be processed right now, try again later, applicable for
|
||||
* - VIRTIO_MEM_REQ_PLUG
|
||||
* - VIRTIO_MEM_REQ_UNPLUG
|
||||
* - VIRTIO_MEM_REQ_UNPLUG_ALL
|
||||
*/
|
||||
#define VIRTIO_MEM_RESP_BUSY 2
|
||||
/*
|
||||
* Error in request (e.g. addresses/alignment), applicable for
|
||||
* - VIRTIO_MEM_REQ_PLUG
|
||||
* - VIRTIO_MEM_REQ_UNPLUG
|
||||
* - VIRTIO_MEM_REQ_STATE
|
||||
*/
|
||||
#define VIRTIO_MEM_RESP_ERROR 3
|
||||
|
||||
|
||||
/* State of memory blocks is "plugged" */
|
||||
#define VIRTIO_MEM_STATE_PLUGGED 0
|
||||
/* State of memory blocks is "unplugged" */
|
||||
#define VIRTIO_MEM_STATE_UNPLUGGED 1
|
||||
/* State of memory blocks is "mixed" */
|
||||
#define VIRTIO_MEM_STATE_MIXED 2
|
||||
|
||||
struct virtio_mem_resp_state {
|
||||
__virtio16 state;
|
||||
};
|
||||
|
||||
struct virtio_mem_resp {
|
||||
__virtio16 type;
|
||||
__virtio16 padding[3];
|
||||
|
||||
union {
|
||||
struct virtio_mem_resp_state state;
|
||||
} u;
|
||||
};
|
||||
|
||||
/* --- virtio-mem: configuration --- */
|
||||
|
||||
struct virtio_mem_config {
|
||||
/* Block size and alignment. Cannot change. */
|
||||
__u64 block_size;
|
||||
/* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */
|
||||
__u16 node_id;
|
||||
__u8 padding[6];
|
||||
/* Start address of the memory region. Cannot change. */
|
||||
__u64 addr;
|
||||
/* Region size (maximum). Cannot change. */
|
||||
__u64 region_size;
|
||||
/*
|
||||
* Currently usable region size. Can grow up to region_size. Can
|
||||
* shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config
|
||||
* update will be sent).
|
||||
*/
|
||||
__u64 usable_region_size;
|
||||
/*
|
||||
* Currently used size. Changes due to plug/unplug requests, but no
|
||||
* config updates will be sent.
|
||||
*/
|
||||
__u64 plugged_size;
|
||||
/* Requested size. New plug requests cannot exceed it. Can change. */
|
||||
__u64 requested_size;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_VIRTIO_MEM_H */
|
@ -86,6 +86,13 @@
|
||||
* at the end of the used ring. Guest should ignore the used->flags field. */
|
||||
#define VIRTIO_RING_F_EVENT_IDX 29
|
||||
|
||||
/* Alignment requirements for vring elements.
|
||||
* When using pre-virtio 1.0 layout, these fall out naturally.
|
||||
*/
|
||||
#define VRING_AVAIL_ALIGN_SIZE 2
|
||||
#define VRING_USED_ALIGN_SIZE 4
|
||||
#define VRING_DESC_ALIGN_SIZE 16
|
||||
|
||||
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
|
||||
struct vring_desc {
|
||||
/* Address (guest-physical). */
|
||||
@ -112,28 +119,47 @@ struct vring_used_elem {
|
||||
__virtio32 len;
|
||||
};
|
||||
|
||||
typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
|
||||
vring_used_elem_t;
|
||||
|
||||
struct vring_used {
|
||||
__virtio16 flags;
|
||||
__virtio16 idx;
|
||||
struct vring_used_elem ring[];
|
||||
vring_used_elem_t ring[];
|
||||
};
|
||||
|
||||
/*
|
||||
* The ring element addresses are passed between components with different
|
||||
* alignments assumptions. Thus, we might need to decrease the compiler-selected
|
||||
* alignment, and so must use a typedef to make sure the aligned attribute
|
||||
* actually takes hold:
|
||||
*
|
||||
* https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes
|
||||
*
|
||||
* When used on a struct, or struct member, the aligned attribute can only
|
||||
* increase the alignment; in order to decrease it, the packed attribute must
|
||||
* be specified as well. When used as part of a typedef, the aligned attribute
|
||||
* can both increase and decrease alignment, and specifying the packed
|
||||
* attribute generates a warning.
|
||||
*/
|
||||
typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
|
||||
vring_desc_t;
|
||||
typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
|
||||
vring_avail_t;
|
||||
typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
|
||||
vring_used_t;
|
||||
|
||||
struct vring {
|
||||
unsigned int num;
|
||||
|
||||
struct vring_desc *desc;
|
||||
vring_desc_t *desc;
|
||||
|
||||
struct vring_avail *avail;
|
||||
vring_avail_t *avail;
|
||||
|
||||
struct vring_used *used;
|
||||
vring_used_t *used;
|
||||
};
|
||||
|
||||
/* Alignment requirements for vring elements.
|
||||
* When using pre-virtio 1.0 layout, these fall out naturally.
|
||||
*/
|
||||
#define VRING_AVAIL_ALIGN_SIZE 2
|
||||
#define VRING_USED_ALIGN_SIZE 4
|
||||
#define VRING_DESC_ALIGN_SIZE 16
|
||||
#ifndef VIRTIO_RING_NO_LEGACY
|
||||
|
||||
/* The standard layout for the ring is a continuous chunk of memory which looks
|
||||
* like this. We assume num is a power of 2.
|
||||
@ -181,6 +207,8 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
|
||||
+ sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
|
||||
}
|
||||
|
||||
#endif /* VIRTIO_RING_NO_LEGACY */
|
||||
|
||||
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
|
||||
/* Assuming a given event_idx value from the other side, if
|
||||
* we have just incremented index from old to new_idx,
|
||||
|
@ -1201,11 +1201,17 @@ struct zone *test_pages_in_a_zone(unsigned long start_pfn,
|
||||
|
||||
/*
|
||||
* Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
|
||||
* non-lru movable pages and hugepages). We scan pfn because it's much
|
||||
* easier than scanning over linked list. This function returns the pfn
|
||||
* of the first found movable page if it's found, otherwise 0.
|
||||
* non-lru movable pages and hugepages). Will skip over most unmovable
|
||||
* pages (esp., pages that can be skipped when offlining), but bail out on
|
||||
* definitely unmovable pages.
|
||||
*
|
||||
* Returns:
|
||||
* 0 in case a movable page is found and movable_pfn was updated.
|
||||
* -ENOENT in case no movable page was found.
|
||||
* -EBUSY in case a definitely unmovable page was found.
|
||||
*/
|
||||
static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
|
||||
static int scan_movable_pages(unsigned long start, unsigned long end,
|
||||
unsigned long *movable_pfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
||||
@ -1217,18 +1223,30 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
|
||||
continue;
|
||||
page = pfn_to_page(pfn);
|
||||
if (PageLRU(page))
|
||||
return pfn;
|
||||
goto found;
|
||||
if (__PageMovable(page))
|
||||
return pfn;
|
||||
goto found;
|
||||
|
||||
/*
|
||||
* PageOffline() pages that are not marked __PageMovable() and
|
||||
* have a reference count > 0 (after MEM_GOING_OFFLINE) are
|
||||
* definitely unmovable. If their reference count would be 0,
|
||||
* they could at least be skipped when offlining memory.
|
||||
*/
|
||||
if (PageOffline(page) && page_count(page))
|
||||
return -EBUSY;
|
||||
|
||||
if (!PageHuge(page))
|
||||
continue;
|
||||
head = compound_head(page);
|
||||
if (page_huge_active(head))
|
||||
return pfn;
|
||||
goto found;
|
||||
skip = compound_nr(head) - (page - head);
|
||||
pfn += skip - 1;
|
||||
}
|
||||
return -ENOENT;
|
||||
found:
|
||||
*movable_pfn = pfn;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1491,7 +1509,8 @@ static int __ref __offline_pages(unsigned long start_pfn,
|
||||
}
|
||||
|
||||
do {
|
||||
for (pfn = start_pfn; pfn;) {
|
||||
pfn = start_pfn;
|
||||
do {
|
||||
if (signal_pending(current)) {
|
||||
ret = -EINTR;
|
||||
reason = "signal backoff";
|
||||
@ -1501,14 +1520,19 @@ static int __ref __offline_pages(unsigned long start_pfn,
|
||||
cond_resched();
|
||||
lru_add_drain_all();
|
||||
|
||||
pfn = scan_movable_pages(pfn, end_pfn);
|
||||
if (pfn) {
|
||||
ret = scan_movable_pages(pfn, end_pfn, &pfn);
|
||||
if (!ret) {
|
||||
/*
|
||||
* TODO: fatal migration failures should bail
|
||||
* out
|
||||
*/
|
||||
do_migrate_range(pfn, end_pfn);
|
||||
}
|
||||
} while (!ret);
|
||||
|
||||
if (ret != -ENOENT) {
|
||||
reason = "unmovable page";
|
||||
goto failed_removal_isolated;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1774,4 +1798,41 @@ int remove_memory(int nid, u64 start, u64 size)
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(remove_memory);
|
||||
|
||||
/*
|
||||
* Try to offline and remove a memory block. Might take a long time to
|
||||
* finish in case memory is still in use. Primarily useful for memory devices
|
||||
* that logically unplugged all memory (so it's no longer in use) and want to
|
||||
* offline + remove the memory block.
|
||||
*/
|
||||
int offline_and_remove_memory(int nid, u64 start, u64 size)
|
||||
{
|
||||
struct memory_block *mem;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
|
||||
size != memory_block_size_bytes())
|
||||
return rc;
|
||||
|
||||
lock_device_hotplug();
|
||||
mem = find_memory_block(__pfn_to_section(PFN_DOWN(start)));
|
||||
if (mem)
|
||||
rc = device_offline(&mem->dev);
|
||||
/* Ignore if the device is already offline. */
|
||||
if (rc > 0)
|
||||
rc = 0;
|
||||
|
||||
/*
|
||||
* In case we succeeded to offline the memory block, remove it.
|
||||
* This cannot fail as it cannot get onlined in the meantime.
|
||||
*/
|
||||
if (!rc) {
|
||||
rc = try_remove_memory(nid, start, size);
|
||||
WARN_ON_ONCE(rc);
|
||||
}
|
||||
unlock_device_hotplug();
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(offline_and_remove_memory);
|
||||
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
||||
|
@ -8285,6 +8285,19 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
|
||||
if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We treat all PageOffline() pages as movable when offlining
|
||||
* to give drivers a chance to decrement their reference count
|
||||
* in MEM_GOING_OFFLINE in order to indicate that these pages
|
||||
* can be offlined as there are no direct references anymore.
|
||||
* For actually unmovable PageOffline() where the driver does
|
||||
* not support this, we will fail later when trying to actually
|
||||
* move these pages that still have a reference count > 0.
|
||||
* (false negatives in this function only)
|
||||
*/
|
||||
if ((flags & MEMORY_OFFLINE) && PageOffline(page))
|
||||
continue;
|
||||
|
||||
if (__PageMovable(page) || PageLRU(page))
|
||||
continue;
|
||||
|
||||
@ -8516,6 +8529,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
||||
pfn_max_align_up(end), migratetype);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(alloc_contig_range);
|
||||
|
||||
static int __alloc_contig_pages(unsigned long start_pfn,
|
||||
unsigned long nr_pages, gfp_t gfp_mask)
|
||||
@ -8631,6 +8645,7 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages)
|
||||
}
|
||||
WARN(count != 0, "%d pages are still in use!\n", count);
|
||||
}
|
||||
EXPORT_SYMBOL(free_contig_range);
|
||||
|
||||
/*
|
||||
* The zone indicated has a new number of managed_pages; batch sizes and percpu
|
||||
@ -8703,6 +8718,17 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
|
||||
offlined_pages++;
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* At this point all remaining PageOffline() pages have a
|
||||
* reference count of 0 and can simply be skipped.
|
||||
*/
|
||||
if (PageOffline(page)) {
|
||||
BUG_ON(page_count(page));
|
||||
BUG_ON(PageBuddy(page));
|
||||
pfn++;
|
||||
offlined_pages++;
|
||||
continue;
|
||||
}
|
||||
|
||||
BUG_ON(page_count(page));
|
||||
BUG_ON(!PageBuddy(page));
|
||||
|
@ -151,6 +151,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
|
||||
* a bit mask)
|
||||
* MEMORY_OFFLINE - isolate to offline (!allocate) memory
|
||||
* e.g., skip over PageHWPoison() pages
|
||||
* and PageOffline() pages.
|
||||
* REPORT_FAILURE - report details about the failure to
|
||||
* isolate the range
|
||||
*
|
||||
@ -259,6 +260,14 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
|
||||
else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
|
||||
/* A HWPoisoned page cannot be also PageBuddy */
|
||||
pfn++;
|
||||
else if ((flags & MEMORY_OFFLINE) && PageOffline(page) &&
|
||||
!page_count(page))
|
||||
/*
|
||||
* The responsible driver agreed to skip PageOffline()
|
||||
* pages when offlining memory by dropping its
|
||||
* reference in MEM_GOING_OFFLINE.
|
||||
*/
|
||||
pfn++;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user