mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 15:29:16 +00:00
Merge branches 'v5.15/vfio/spdx-license-cleanups', 'v5.15/vfio/dma-valid-waited-v3', 'v5.15/vfio/vfio-pci-core-v5' and 'v5.15/vfio/vfio-ap' into v5.15/vfio/next
This commit is contained in:
commit
ea870730d8
@ -103,6 +103,7 @@ need pass only as many optional fields as necessary:
|
||||
- subvendor and subdevice fields default to PCI_ANY_ID (FFFFFFFF)
|
||||
- class and classmask fields default to 0
|
||||
- driver_data defaults to 0UL.
|
||||
- override_only field defaults to 0.
|
||||
|
||||
Note that driver_data must match the value used by any of the pci_device_id
|
||||
entries defined in the driver. This makes the driver_data field mandatory
|
||||
|
@ -19466,6 +19466,7 @@ T: git git://github.com/awilliam/linux-vfio.git
|
||||
F: Documentation/driver-api/vfio.rst
|
||||
F: drivers/vfio/
|
||||
F: include/linux/vfio.h
|
||||
F: include/linux/vfio_pci_core.h
|
||||
F: include/uapi/linux/vfio.h
|
||||
|
||||
VFIO FSL-MC DRIVER
|
||||
|
@ -798,14 +798,12 @@ struct kvm_s390_cpu_model {
|
||||
unsigned short ibc;
|
||||
};
|
||||
|
||||
struct kvm_s390_module_hook {
|
||||
int (*hook)(struct kvm_vcpu *vcpu);
|
||||
struct module *owner;
|
||||
};
|
||||
typedef int (*crypto_hook)(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_s390_crypto {
|
||||
struct kvm_s390_crypto_cb *crycb;
|
||||
struct kvm_s390_module_hook *pqap_hook;
|
||||
struct rw_semaphore pqap_hook_rwsem;
|
||||
crypto_hook *pqap_hook;
|
||||
__u32 crycbd;
|
||||
__u8 aes_kw;
|
||||
__u8 dea_kw;
|
||||
|
@ -2559,12 +2559,26 @@ static void kvm_s390_set_crycb_format(struct kvm *kvm)
|
||||
kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
|
||||
}
|
||||
|
||||
/*
|
||||
* kvm_arch_crypto_set_masks
|
||||
*
|
||||
* @kvm: pointer to the target guest's KVM struct containing the crypto masks
|
||||
* to be set.
|
||||
* @apm: the mask identifying the accessible AP adapters
|
||||
* @aqm: the mask identifying the accessible AP domains
|
||||
* @adm: the mask identifying the accessible AP control domains
|
||||
*
|
||||
* Set the masks that identify the adapters, domains and control domains to
|
||||
* which the KVM guest is granted access.
|
||||
*
|
||||
* Note: The kvm->lock mutex must be locked by the caller before invoking this
|
||||
* function.
|
||||
*/
|
||||
void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
|
||||
unsigned long *aqm, unsigned long *adm)
|
||||
{
|
||||
struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm_s390_vcpu_block_all(kvm);
|
||||
|
||||
switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
|
||||
@ -2595,13 +2609,23 @@ void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
|
||||
/* recreate the shadow crycb for each vcpu */
|
||||
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
|
||||
kvm_s390_vcpu_unblock_all(kvm);
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
|
||||
|
||||
/*
|
||||
* kvm_arch_crypto_clear_masks
|
||||
*
|
||||
* @kvm: pointer to the target guest's KVM struct containing the crypto masks
|
||||
* to be cleared.
|
||||
*
|
||||
* Clear the masks that identify the adapters, domains and control domains to
|
||||
* which the KVM guest is granted access.
|
||||
*
|
||||
* Note: The kvm->lock mutex must be locked by the caller before invoking this
|
||||
* function.
|
||||
*/
|
||||
void kvm_arch_crypto_clear_masks(struct kvm *kvm)
|
||||
{
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm_s390_vcpu_block_all(kvm);
|
||||
|
||||
memset(&kvm->arch.crypto.crycb->apcb0, 0,
|
||||
@ -2613,7 +2637,6 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm)
|
||||
/* recreate the shadow crycb for each vcpu */
|
||||
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
|
||||
kvm_s390_vcpu_unblock_all(kvm);
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
|
||||
|
||||
@ -2630,6 +2653,7 @@ static void kvm_s390_crypto_init(struct kvm *kvm)
|
||||
{
|
||||
kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
|
||||
kvm_s390_set_crycb_format(kvm);
|
||||
init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
|
||||
|
||||
if (!test_kvm_facility(kvm, 76))
|
||||
return;
|
||||
|
@ -610,6 +610,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
|
||||
static int handle_pqap(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct ap_queue_status status = {};
|
||||
crypto_hook pqap_hook;
|
||||
unsigned long reg0;
|
||||
int ret;
|
||||
uint8_t fc;
|
||||
@ -654,18 +655,20 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
/*
|
||||
* Verify that the hook callback is registered, lock the owner
|
||||
* and call the hook.
|
||||
* If the hook callback is registered, there will be a pointer to the
|
||||
* hook function pointer in the kvm_s390_crypto structure. Lock the
|
||||
* owner, retrieve the hook function pointer and call the hook.
|
||||
*/
|
||||
down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
|
||||
if (vcpu->kvm->arch.crypto.pqap_hook) {
|
||||
if (!try_module_get(vcpu->kvm->arch.crypto.pqap_hook->owner))
|
||||
return -EOPNOTSUPP;
|
||||
ret = vcpu->kvm->arch.crypto.pqap_hook->hook(vcpu);
|
||||
module_put(vcpu->kvm->arch.crypto.pqap_hook->owner);
|
||||
pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
|
||||
ret = pqap_hook(vcpu);
|
||||
if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
|
||||
kvm_s390_set_psw_cc(vcpu, 3);
|
||||
up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
|
||||
return ret;
|
||||
}
|
||||
up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
|
||||
/*
|
||||
* A vfio_driver must register a hook.
|
||||
* No hook means no driver to enable the SIE CRYCB and no queues.
|
||||
|
@ -136,7 +136,7 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
|
||||
struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dynid *dynid;
|
||||
const struct pci_device_id *found_id = NULL;
|
||||
const struct pci_device_id *found_id = NULL, *ids;
|
||||
|
||||
/* When driver_override is set, only bind to the matching driver */
|
||||
if (dev->driver_override && strcmp(dev->driver_override, drv->name))
|
||||
@ -152,14 +152,28 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
|
||||
}
|
||||
spin_unlock(&drv->dynids.lock);
|
||||
|
||||
if (!found_id)
|
||||
found_id = pci_match_id(drv->id_table, dev);
|
||||
if (found_id)
|
||||
return found_id;
|
||||
|
||||
for (ids = drv->id_table; (found_id = pci_match_id(ids, dev));
|
||||
ids = found_id + 1) {
|
||||
/*
|
||||
* The match table is split based on driver_override.
|
||||
* In case override_only was set, enforce driver_override
|
||||
* matching.
|
||||
*/
|
||||
if (found_id->override_only) {
|
||||
if (dev->driver_override)
|
||||
return found_id;
|
||||
} else {
|
||||
return found_id;
|
||||
}
|
||||
}
|
||||
|
||||
/* driver_override will always match, send a dummy id */
|
||||
if (!found_id && dev->driver_override)
|
||||
found_id = &pci_device_id_any;
|
||||
|
||||
return found_id;
|
||||
if (dev->driver_override)
|
||||
return &pci_device_id_any;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -24,8 +24,9 @@
|
||||
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
|
||||
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
|
||||
|
||||
static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
|
||||
static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
|
||||
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
|
||||
static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
|
||||
|
||||
static int match_apqn(struct device *dev, const void *data)
|
||||
{
|
||||
@ -294,15 +295,6 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
|
||||
matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
|
||||
struct ap_matrix_mdev, pqap_hook);
|
||||
|
||||
/*
|
||||
* If the KVM pointer is in the process of being set, wait until the
|
||||
* process has completed.
|
||||
*/
|
||||
wait_event_cmd(matrix_mdev->wait_for_kvm,
|
||||
!matrix_mdev->kvm_busy,
|
||||
mutex_unlock(&matrix_dev->lock),
|
||||
mutex_lock(&matrix_dev->lock));
|
||||
|
||||
/* If the there is no guest using the mdev, there is nothing to do */
|
||||
if (!matrix_mdev->kvm)
|
||||
goto out_unlock;
|
||||
@ -335,45 +327,57 @@ static void vfio_ap_matrix_init(struct ap_config_info *info,
|
||||
matrix->adm_max = info->apxa ? info->Nd : 15;
|
||||
}
|
||||
|
||||
static int vfio_ap_mdev_create(struct mdev_device *mdev)
|
||||
static int vfio_ap_mdev_probe(struct mdev_device *mdev)
|
||||
{
|
||||
struct ap_matrix_mdev *matrix_mdev;
|
||||
int ret;
|
||||
|
||||
if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
|
||||
return -EPERM;
|
||||
|
||||
matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
|
||||
if (!matrix_mdev) {
|
||||
atomic_inc(&matrix_dev->available_instances);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err_dec_available;
|
||||
}
|
||||
vfio_init_group_dev(&matrix_mdev->vdev, &mdev->dev,
|
||||
&vfio_ap_matrix_dev_ops);
|
||||
|
||||
matrix_mdev->mdev = mdev;
|
||||
vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
|
||||
init_waitqueue_head(&matrix_mdev->wait_for_kvm);
|
||||
mdev_set_drvdata(mdev, matrix_mdev);
|
||||
matrix_mdev->pqap_hook.hook = handle_pqap;
|
||||
matrix_mdev->pqap_hook.owner = THIS_MODULE;
|
||||
matrix_mdev->pqap_hook = handle_pqap;
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
|
||||
mutex_unlock(&matrix_dev->lock);
|
||||
|
||||
ret = vfio_register_group_dev(&matrix_mdev->vdev);
|
||||
if (ret)
|
||||
goto err_list;
|
||||
dev_set_drvdata(&mdev->dev, matrix_mdev);
|
||||
return 0;
|
||||
|
||||
err_list:
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
list_del(&matrix_mdev->node);
|
||||
mutex_unlock(&matrix_dev->lock);
|
||||
kfree(matrix_mdev);
|
||||
err_dec_available:
|
||||
atomic_inc(&matrix_dev->available_instances);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_ap_mdev_remove(struct mdev_device *mdev)
|
||||
static void vfio_ap_mdev_remove(struct mdev_device *mdev)
|
||||
{
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
|
||||
|
||||
vfio_unregister_group_dev(&matrix_mdev->vdev);
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
vfio_ap_mdev_reset_queues(mdev);
|
||||
vfio_ap_mdev_reset_queues(matrix_mdev);
|
||||
list_del(&matrix_mdev->node);
|
||||
kfree(matrix_mdev);
|
||||
mdev_set_drvdata(mdev, NULL);
|
||||
atomic_inc(&matrix_dev->available_instances);
|
||||
mutex_unlock(&matrix_dev->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t name_show(struct mdev_type *mtype,
|
||||
@ -615,16 +619,12 @@ static ssize_t assign_adapter_store(struct device *dev,
|
||||
{
|
||||
int ret;
|
||||
unsigned long apid;
|
||||
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
|
||||
/*
|
||||
* If the KVM pointer is in flux or the guest is running, disallow
|
||||
* un-assignment of adapter
|
||||
*/
|
||||
if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
||||
/* If the KVM guest is running, disallow assignment of adapter */
|
||||
if (matrix_mdev->kvm) {
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
@ -688,16 +688,12 @@ static ssize_t unassign_adapter_store(struct device *dev,
|
||||
{
|
||||
int ret;
|
||||
unsigned long apid;
|
||||
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
|
||||
/*
|
||||
* If the KVM pointer is in flux or the guest is running, disallow
|
||||
* un-assignment of adapter
|
||||
*/
|
||||
if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
||||
/* If the KVM guest is running, disallow unassignment of adapter */
|
||||
if (matrix_mdev->kvm) {
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
@ -777,17 +773,13 @@ static ssize_t assign_domain_store(struct device *dev,
|
||||
{
|
||||
int ret;
|
||||
unsigned long apqi;
|
||||
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
|
||||
unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
|
||||
/*
|
||||
* If the KVM pointer is in flux or the guest is running, disallow
|
||||
* assignment of domain
|
||||
*/
|
||||
if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
||||
/* If the KVM guest is running, disallow assignment of domain */
|
||||
if (matrix_mdev->kvm) {
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
@ -846,16 +838,12 @@ static ssize_t unassign_domain_store(struct device *dev,
|
||||
{
|
||||
int ret;
|
||||
unsigned long apqi;
|
||||
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
|
||||
/*
|
||||
* If the KVM pointer is in flux or the guest is running, disallow
|
||||
* un-assignment of domain
|
||||
*/
|
||||
if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
||||
/* If the KVM guest is running, disallow unassignment of domain */
|
||||
if (matrix_mdev->kvm) {
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
@ -900,16 +888,12 @@ static ssize_t assign_control_domain_store(struct device *dev,
|
||||
{
|
||||
int ret;
|
||||
unsigned long id;
|
||||
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
|
||||
/*
|
||||
* If the KVM pointer is in flux or the guest is running, disallow
|
||||
* assignment of control domain.
|
||||
*/
|
||||
if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
||||
/* If the KVM guest is running, disallow assignment of control domain */
|
||||
if (matrix_mdev->kvm) {
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
@ -958,17 +942,13 @@ static ssize_t unassign_control_domain_store(struct device *dev,
|
||||
{
|
||||
int ret;
|
||||
unsigned long domid;
|
||||
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
|
||||
unsigned long max_domid = matrix_mdev->matrix.adm_max;
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
|
||||
/*
|
||||
* If the KVM pointer is in flux or the guest is running, disallow
|
||||
* un-assignment of control domain.
|
||||
*/
|
||||
if (matrix_mdev->kvm_busy || matrix_mdev->kvm) {
|
||||
/* If a KVM guest is running, disallow unassignment of control domain */
|
||||
if (matrix_mdev->kvm) {
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
@ -997,8 +977,7 @@ static ssize_t control_domains_show(struct device *dev,
|
||||
int nchars = 0;
|
||||
int n;
|
||||
char *bufpos = buf;
|
||||
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
|
||||
unsigned long max_domid = matrix_mdev->matrix.adm_max;
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
@ -1016,8 +995,7 @@ static DEVICE_ATTR_RO(control_domains);
|
||||
static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
|
||||
char *bufpos = buf;
|
||||
unsigned long apid;
|
||||
unsigned long apqi;
|
||||
@ -1109,23 +1087,30 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
|
||||
struct ap_matrix_mdev *m;
|
||||
|
||||
if (kvm->arch.crypto.crycbd) {
|
||||
down_write(&kvm->arch.crypto.pqap_hook_rwsem);
|
||||
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
|
||||
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
|
||||
list_for_each_entry(m, &matrix_dev->mdev_list, node) {
|
||||
if (m != matrix_mdev && m->kvm == kvm)
|
||||
if (m != matrix_mdev && m->kvm == kvm) {
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&matrix_dev->lock);
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
||||
kvm_get_kvm(kvm);
|
||||
matrix_mdev->kvm_busy = true;
|
||||
mutex_unlock(&matrix_dev->lock);
|
||||
matrix_mdev->kvm = kvm;
|
||||
kvm_arch_crypto_set_masks(kvm,
|
||||
matrix_mdev->matrix.apm,
|
||||
matrix_mdev->matrix.aqm,
|
||||
matrix_mdev->matrix.adm);
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
|
||||
matrix_mdev->kvm = kvm;
|
||||
matrix_mdev->kvm_busy = false;
|
||||
wake_up_all(&matrix_mdev->wait_for_kvm);
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&matrix_dev->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1175,28 +1160,24 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
|
||||
* done under the @matrix_mdev->lock.
|
||||
*
|
||||
*/
|
||||
static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
|
||||
static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev,
|
||||
struct kvm *kvm)
|
||||
{
|
||||
/*
|
||||
* If the KVM pointer is in the process of being set, wait until the
|
||||
* process has completed.
|
||||
*/
|
||||
wait_event_cmd(matrix_mdev->wait_for_kvm,
|
||||
!matrix_mdev->kvm_busy,
|
||||
mutex_unlock(&matrix_dev->lock),
|
||||
mutex_lock(&matrix_dev->lock));
|
||||
if (kvm && kvm->arch.crypto.crycbd) {
|
||||
down_write(&kvm->arch.crypto.pqap_hook_rwsem);
|
||||
kvm->arch.crypto.pqap_hook = NULL;
|
||||
up_write(&kvm->arch.crypto.pqap_hook_rwsem);
|
||||
|
||||
if (matrix_mdev->kvm) {
|
||||
matrix_mdev->kvm_busy = true;
|
||||
mutex_unlock(&matrix_dev->lock);
|
||||
kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
|
||||
matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
|
||||
kvm_put_kvm(matrix_mdev->kvm);
|
||||
|
||||
kvm_arch_crypto_clear_masks(kvm);
|
||||
vfio_ap_mdev_reset_queues(matrix_mdev);
|
||||
kvm_put_kvm(kvm);
|
||||
matrix_mdev->kvm = NULL;
|
||||
matrix_mdev->kvm_busy = false;
|
||||
wake_up_all(&matrix_mdev->wait_for_kvm);
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&matrix_dev->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1209,16 +1190,13 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
|
||||
if (action != VFIO_GROUP_NOTIFY_SET_KVM)
|
||||
return NOTIFY_OK;
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
|
||||
|
||||
if (!data)
|
||||
vfio_ap_mdev_unset_kvm(matrix_mdev);
|
||||
vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
|
||||
else if (vfio_ap_mdev_set_kvm(matrix_mdev, data))
|
||||
notify_rc = NOTIFY_DONE;
|
||||
|
||||
mutex_unlock(&matrix_dev->lock);
|
||||
|
||||
return notify_rc;
|
||||
}
|
||||
|
||||
@ -1288,13 +1266,12 @@ free_resources:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
|
||||
static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
|
||||
{
|
||||
int ret;
|
||||
int rc = 0;
|
||||
unsigned long apid, apqi;
|
||||
struct vfio_ap_queue *q;
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
|
||||
for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
|
||||
matrix_mdev->matrix.apm_max + 1) {
|
||||
@ -1315,52 +1292,45 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int vfio_ap_mdev_open_device(struct mdev_device *mdev)
|
||||
static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
|
||||
{
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev =
|
||||
container_of(vdev, struct ap_matrix_mdev, vdev);
|
||||
unsigned long events;
|
||||
int ret;
|
||||
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return -ENODEV;
|
||||
|
||||
matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
|
||||
events = VFIO_GROUP_NOTIFY_SET_KVM;
|
||||
|
||||
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
|
||||
ret = vfio_register_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
|
||||
&events, &matrix_mdev->group_notifier);
|
||||
if (ret) {
|
||||
module_put(THIS_MODULE);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
matrix_mdev->iommu_notifier.notifier_call = vfio_ap_mdev_iommu_notifier;
|
||||
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
|
||||
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
|
||||
ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
|
||||
&events, &matrix_mdev->iommu_notifier);
|
||||
if (!ret)
|
||||
return ret;
|
||||
if (ret)
|
||||
goto out_unregister_group;
|
||||
return 0;
|
||||
|
||||
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
|
||||
out_unregister_group:
|
||||
vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
|
||||
&matrix_mdev->group_notifier);
|
||||
module_put(THIS_MODULE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vfio_ap_mdev_close_device(struct mdev_device *mdev)
|
||||
static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
|
||||
{
|
||||
struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
|
||||
struct ap_matrix_mdev *matrix_mdev =
|
||||
container_of(vdev, struct ap_matrix_mdev, vdev);
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
vfio_ap_mdev_unset_kvm(matrix_mdev);
|
||||
mutex_unlock(&matrix_dev->lock);
|
||||
|
||||
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
|
||||
vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
|
||||
&matrix_mdev->iommu_notifier);
|
||||
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
|
||||
vfio_unregister_notifier(vdev->dev, VFIO_GROUP_NOTIFY,
|
||||
&matrix_mdev->group_notifier);
|
||||
module_put(THIS_MODULE);
|
||||
vfio_ap_mdev_unset_kvm(matrix_mdev, matrix_mdev->kvm);
|
||||
}
|
||||
|
||||
static int vfio_ap_mdev_get_device_info(unsigned long arg)
|
||||
@ -1383,11 +1353,12 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg)
|
||||
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
|
||||
static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct ap_matrix_mdev *matrix_mdev =
|
||||
container_of(vdev, struct ap_matrix_mdev, vdev);
|
||||
int ret;
|
||||
struct ap_matrix_mdev *matrix_mdev;
|
||||
|
||||
mutex_lock(&matrix_dev->lock);
|
||||
switch (cmd) {
|
||||
@ -1395,22 +1366,7 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
|
||||
ret = vfio_ap_mdev_get_device_info(arg);
|
||||
break;
|
||||
case VFIO_DEVICE_RESET:
|
||||
matrix_mdev = mdev_get_drvdata(mdev);
|
||||
if (WARN(!matrix_mdev, "Driver data missing from mdev!!")) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the KVM pointer is in the process of being set, wait until
|
||||
* the process has completed.
|
||||
*/
|
||||
wait_event_cmd(matrix_mdev->wait_for_kvm,
|
||||
!matrix_mdev->kvm_busy,
|
||||
mutex_unlock(&matrix_dev->lock),
|
||||
mutex_lock(&matrix_dev->lock));
|
||||
|
||||
ret = vfio_ap_mdev_reset_queues(mdev);
|
||||
ret = vfio_ap_mdev_reset_queues(matrix_mdev);
|
||||
break;
|
||||
default:
|
||||
ret = -EOPNOTSUPP;
|
||||
@ -1421,25 +1377,51 @@ static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
|
||||
.open_device = vfio_ap_mdev_open_device,
|
||||
.close_device = vfio_ap_mdev_close_device,
|
||||
.ioctl = vfio_ap_mdev_ioctl,
|
||||
};
|
||||
|
||||
static struct mdev_driver vfio_ap_matrix_driver = {
|
||||
.driver = {
|
||||
.name = "vfio_ap_mdev",
|
||||
.owner = THIS_MODULE,
|
||||
.mod_name = KBUILD_MODNAME,
|
||||
.dev_groups = vfio_ap_mdev_attr_groups,
|
||||
},
|
||||
.probe = vfio_ap_mdev_probe,
|
||||
.remove = vfio_ap_mdev_remove,
|
||||
};
|
||||
|
||||
static const struct mdev_parent_ops vfio_ap_matrix_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.device_driver = &vfio_ap_matrix_driver,
|
||||
.supported_type_groups = vfio_ap_mdev_type_groups,
|
||||
.mdev_attr_groups = vfio_ap_mdev_attr_groups,
|
||||
.create = vfio_ap_mdev_create,
|
||||
.remove = vfio_ap_mdev_remove,
|
||||
.open_device = vfio_ap_mdev_open_device,
|
||||
.close_device = vfio_ap_mdev_close_device,
|
||||
.ioctl = vfio_ap_mdev_ioctl,
|
||||
};
|
||||
|
||||
int vfio_ap_mdev_register(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
|
||||
|
||||
return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
|
||||
ret = mdev_register_driver(&vfio_ap_matrix_driver);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
|
||||
if (ret)
|
||||
goto err_driver;
|
||||
return 0;
|
||||
|
||||
err_driver:
|
||||
mdev_unregister_driver(&vfio_ap_matrix_driver);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vfio_ap_mdev_unregister(void)
|
||||
{
|
||||
mdev_unregister_device(&matrix_dev->device);
|
||||
mdev_unregister_driver(&vfio_ap_matrix_driver);
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/vfio.h>
|
||||
|
||||
#include "ap_bus.h"
|
||||
|
||||
@ -79,14 +80,13 @@ struct ap_matrix {
|
||||
* @kvm: the struct holding guest's state
|
||||
*/
|
||||
struct ap_matrix_mdev {
|
||||
struct vfio_device vdev;
|
||||
struct list_head node;
|
||||
struct ap_matrix matrix;
|
||||
struct notifier_block group_notifier;
|
||||
struct notifier_block iommu_notifier;
|
||||
bool kvm_busy;
|
||||
wait_queue_head_t wait_for_kvm;
|
||||
struct kvm *kvm;
|
||||
struct kvm_s390_module_hook pqap_hook;
|
||||
crypto_hook pqap_hook;
|
||||
struct mdev_device *mdev;
|
||||
};
|
||||
|
||||
|
@ -1,24 +1,4 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config VFIO_IOMMU_TYPE1
|
||||
tristate
|
||||
depends on VFIO
|
||||
default n
|
||||
|
||||
config VFIO_IOMMU_SPAPR_TCE
|
||||
tristate
|
||||
depends on VFIO && SPAPR_TCE_IOMMU
|
||||
default VFIO
|
||||
|
||||
config VFIO_SPAPR_EEH
|
||||
tristate
|
||||
depends on EEH && VFIO_IOMMU_SPAPR_TCE
|
||||
default VFIO
|
||||
|
||||
config VFIO_VIRQFD
|
||||
tristate
|
||||
depends on VFIO && EVENTFD
|
||||
default n
|
||||
|
||||
menuconfig VFIO
|
||||
tristate "VFIO Non-Privileged userspace driver framework"
|
||||
select IOMMU_API
|
||||
@ -29,9 +9,28 @@ menuconfig VFIO
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
if VFIO
|
||||
config VFIO_IOMMU_TYPE1
|
||||
tristate
|
||||
default n
|
||||
|
||||
config VFIO_IOMMU_SPAPR_TCE
|
||||
tristate
|
||||
depends on SPAPR_TCE_IOMMU
|
||||
default VFIO
|
||||
|
||||
config VFIO_SPAPR_EEH
|
||||
tristate
|
||||
depends on EEH && VFIO_IOMMU_SPAPR_TCE
|
||||
default VFIO
|
||||
|
||||
config VFIO_VIRQFD
|
||||
tristate
|
||||
select EVENTFD
|
||||
default n
|
||||
|
||||
config VFIO_NOIOMMU
|
||||
bool "VFIO No-IOMMU support"
|
||||
depends on VFIO
|
||||
help
|
||||
VFIO is built on the ability to isolate devices using the IOMMU.
|
||||
Only with an IOMMU can userspace access to DMA capable devices be
|
||||
@ -48,4 +47,6 @@ source "drivers/vfio/pci/Kconfig"
|
||||
source "drivers/vfio/platform/Kconfig"
|
||||
source "drivers/vfio/mdev/Kconfig"
|
||||
source "drivers/vfio/fsl-mc/Kconfig"
|
||||
endif
|
||||
|
||||
source "virt/lib/Kconfig"
|
||||
|
@ -1,6 +1,7 @@
|
||||
config VFIO_FSL_MC
|
||||
tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices"
|
||||
depends on VFIO && FSL_MC_BUS && EVENTFD
|
||||
depends on FSL_MC_BUS
|
||||
select EVENTFD
|
||||
help
|
||||
Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc
|
||||
(Management Complex) devices. This is required to passthrough
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
config VFIO_MDEV
|
||||
tristate "Mediated device driver framework"
|
||||
depends on VFIO
|
||||
default n
|
||||
help
|
||||
Provides a framework to virtualize devices.
|
||||
|
@ -1,19 +1,29 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config VFIO_PCI
|
||||
tristate "VFIO support for PCI devices"
|
||||
depends on VFIO && PCI && EVENTFD
|
||||
depends on MMU
|
||||
if PCI && MMU
|
||||
config VFIO_PCI_CORE
|
||||
tristate
|
||||
select VFIO_VIRQFD
|
||||
select IRQ_BYPASS_MANAGER
|
||||
|
||||
config VFIO_PCI_MMAP
|
||||
def_bool y if !S390
|
||||
|
||||
config VFIO_PCI_INTX
|
||||
def_bool y if !S390
|
||||
|
||||
config VFIO_PCI
|
||||
tristate "Generic VFIO support for any PCI device"
|
||||
select VFIO_PCI_CORE
|
||||
help
|
||||
Support for the PCI VFIO bus driver. This is required to make
|
||||
use of PCI drivers using the VFIO framework.
|
||||
Support for the generic PCI VFIO bus driver which can connect any
|
||||
PCI device to the VFIO framework.
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
if VFIO_PCI
|
||||
config VFIO_PCI_VGA
|
||||
bool "VFIO PCI support for VGA devices"
|
||||
depends on VFIO_PCI && X86 && VGA_ARB
|
||||
bool "Generic VFIO PCI support for VGA devices"
|
||||
depends on X86 && VGA_ARB
|
||||
help
|
||||
Support for VGA extension to VFIO PCI. This exposes an additional
|
||||
region on VGA devices for accessing legacy VGA addresses used by
|
||||
@ -21,17 +31,9 @@ config VFIO_PCI_VGA
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
config VFIO_PCI_MMAP
|
||||
depends on VFIO_PCI
|
||||
def_bool y if !S390
|
||||
|
||||
config VFIO_PCI_INTX
|
||||
depends on VFIO_PCI
|
||||
def_bool y if !S390
|
||||
|
||||
config VFIO_PCI_IGD
|
||||
bool "VFIO PCI extensions for Intel graphics (GVT-d)"
|
||||
depends on VFIO_PCI && X86
|
||||
bool "Generic VFIO PCI extensions for Intel graphics (GVT-d)"
|
||||
depends on X86
|
||||
default y
|
||||
help
|
||||
Support for Intel IGD specific extensions to enable direct
|
||||
@ -40,3 +42,5 @@ config VFIO_PCI_IGD
|
||||
and LPC bridge config space.
|
||||
|
||||
To enable Intel IGD assignment through vfio-pci, say Y.
|
||||
endif
|
||||
endif
|
||||
|
@ -1,7 +1,9 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
|
||||
vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
|
||||
vfio-pci-$(CONFIG_S390) += vfio_pci_zdev.o
|
||||
vfio-pci-core-y := vfio_pci_core.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
|
||||
vfio-pci-core-$(CONFIG_S390) += vfio_pci_zdev.o
|
||||
obj-$(CONFIG_VFIO_PCI_CORE) += vfio-pci-core.o
|
||||
|
||||
vfio-pci-y := vfio_pci.o
|
||||
vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
|
||||
obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -26,7 +26,7 @@
|
||||
#include <linux/vfio.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "vfio_pci_private.h"
|
||||
#include <linux/vfio_pci_core.h>
|
||||
|
||||
/* Fake capability ID for standard config space */
|
||||
#define PCI_CAP_ID_BASIC 0
|
||||
@ -108,9 +108,9 @@ static const u16 pci_ext_cap_length[PCI_EXT_CAP_ID_MAX + 1] = {
|
||||
struct perm_bits {
|
||||
u8 *virt; /* read/write virtual data, not hw */
|
||||
u8 *write; /* writeable bits */
|
||||
int (*readfn)(struct vfio_pci_device *vdev, int pos, int count,
|
||||
int (*readfn)(struct vfio_pci_core_device *vdev, int pos, int count,
|
||||
struct perm_bits *perm, int offset, __le32 *val);
|
||||
int (*writefn)(struct vfio_pci_device *vdev, int pos, int count,
|
||||
int (*writefn)(struct vfio_pci_core_device *vdev, int pos, int count,
|
||||
struct perm_bits *perm, int offset, __le32 val);
|
||||
};
|
||||
|
||||
@ -171,7 +171,7 @@ static int vfio_user_config_write(struct pci_dev *pdev, int offset,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_default_config_read(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 *val)
|
||||
{
|
||||
@ -197,7 +197,7 @@ static int vfio_default_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
return count;
|
||||
}
|
||||
|
||||
static int vfio_default_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_default_config_write(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
@ -244,7 +244,7 @@ static int vfio_default_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
}
|
||||
|
||||
/* Allow direct read from hardware, except for capability next pointer */
|
||||
static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_direct_config_read(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 *val)
|
||||
{
|
||||
@ -269,7 +269,7 @@ static int vfio_direct_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
}
|
||||
|
||||
/* Raw access skips any kind of virtualization */
|
||||
static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_raw_config_write(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
@ -282,7 +282,7 @@ static int vfio_raw_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
return count;
|
||||
}
|
||||
|
||||
static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_raw_config_read(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 *val)
|
||||
{
|
||||
@ -296,7 +296,7 @@ static int vfio_raw_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
}
|
||||
|
||||
/* Virt access uses only virtualization */
|
||||
static int vfio_virt_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_virt_config_write(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
@ -304,7 +304,7 @@ static int vfio_virt_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
return count;
|
||||
}
|
||||
|
||||
static int vfio_virt_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_virt_config_read(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 *val)
|
||||
{
|
||||
@ -396,7 +396,7 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
|
||||
}
|
||||
|
||||
/* Caller should hold memory_lock semaphore */
|
||||
bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
|
||||
bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
|
||||
@ -413,7 +413,7 @@ bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
|
||||
* Restore the *real* BARs after we detect a FLR or backdoor reset.
|
||||
* (backdoor = some device specific technique that we didn't catch)
|
||||
*/
|
||||
static void vfio_bar_restore(struct vfio_pci_device *vdev)
|
||||
static void vfio_bar_restore(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
u32 *rbar = vdev->rbar;
|
||||
@ -460,7 +460,7 @@ static __le32 vfio_generate_bar_flags(struct pci_dev *pdev, int bar)
|
||||
* Pretend we're hardware and tweak the values of the *virtual* PCI BARs
|
||||
* to reflect the hardware capabilities. This implements BAR sizing.
|
||||
*/
|
||||
static void vfio_bar_fixup(struct vfio_pci_device *vdev)
|
||||
static void vfio_bar_fixup(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
int i;
|
||||
@ -514,7 +514,7 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
|
||||
vdev->bardirty = false;
|
||||
}
|
||||
|
||||
static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_basic_config_read(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 *val)
|
||||
{
|
||||
@ -536,7 +536,7 @@ static int vfio_basic_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
}
|
||||
|
||||
/* Test whether BARs match the value we think they should contain */
|
||||
static bool vfio_need_bar_restore(struct vfio_pci_device *vdev)
|
||||
static bool vfio_need_bar_restore(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
int i = 0, pos = PCI_BASE_ADDRESS_0, ret;
|
||||
u32 bar;
|
||||
@ -552,7 +552,7 @@ static bool vfio_need_bar_restore(struct vfio_pci_device *vdev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int vfio_basic_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_basic_config_write(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
@ -692,7 +692,7 @@ static int __init init_pci_cap_basic_perm(struct perm_bits *perm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_pm_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_pm_config_write(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
@ -747,7 +747,7 @@ static int __init init_pci_cap_pm_perm(struct perm_bits *perm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_vpd_config_write(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
@ -829,7 +829,7 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_exp_config_write(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
@ -913,7 +913,7 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_af_config_write(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
@ -1072,7 +1072,7 @@ int __init vfio_pci_init_perm_bits(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
|
||||
static int vfio_find_cap_start(struct vfio_pci_core_device *vdev, int pos)
|
||||
{
|
||||
u8 cap;
|
||||
int base = (pos >= PCI_CFG_SPACE_SIZE) ? PCI_CFG_SPACE_SIZE :
|
||||
@ -1089,7 +1089,7 @@ static int vfio_find_cap_start(struct vfio_pci_device *vdev, int pos)
|
||||
return pos;
|
||||
}
|
||||
|
||||
static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_msi_config_read(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 *val)
|
||||
{
|
||||
@ -1109,7 +1109,7 @@ static int vfio_msi_config_read(struct vfio_pci_device *vdev, int pos,
|
||||
return vfio_default_config_read(vdev, pos, count, perm, offset, val);
|
||||
}
|
||||
|
||||
static int vfio_msi_config_write(struct vfio_pci_device *vdev, int pos,
|
||||
static int vfio_msi_config_write(struct vfio_pci_core_device *vdev, int pos,
|
||||
int count, struct perm_bits *perm,
|
||||
int offset, __le32 val)
|
||||
{
|
||||
@ -1189,7 +1189,7 @@ static int init_pci_cap_msi_perm(struct perm_bits *perm, int len, u16 flags)
|
||||
}
|
||||
|
||||
/* Determine MSI CAP field length; initialize msi_perms on 1st call per vdev */
|
||||
static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos)
|
||||
static int vfio_msi_cap_len(struct vfio_pci_core_device *vdev, u8 pos)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
int len, ret;
|
||||
@ -1222,7 +1222,7 @@ static int vfio_msi_cap_len(struct vfio_pci_device *vdev, u8 pos)
|
||||
}
|
||||
|
||||
/* Determine extended capability length for VC (2 & 9) and MFVC */
|
||||
static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
|
||||
static int vfio_vc_cap_len(struct vfio_pci_core_device *vdev, u16 pos)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
u32 tmp;
|
||||
@ -1263,7 +1263,7 @@ static int vfio_vc_cap_len(struct vfio_pci_device *vdev, u16 pos)
|
||||
return len;
|
||||
}
|
||||
|
||||
static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
|
||||
static int vfio_cap_len(struct vfio_pci_core_device *vdev, u8 cap, u8 pos)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
u32 dword;
|
||||
@ -1338,7 +1338,7 @@ static int vfio_cap_len(struct vfio_pci_device *vdev, u8 cap, u8 pos)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
|
||||
static int vfio_ext_cap_len(struct vfio_pci_core_device *vdev, u16 ecap, u16 epos)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
u8 byte;
|
||||
@ -1412,7 +1412,7 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_fill_vconfig_bytes(struct vfio_pci_device *vdev,
|
||||
static int vfio_fill_vconfig_bytes(struct vfio_pci_core_device *vdev,
|
||||
int offset, int size)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
@ -1459,7 +1459,7 @@ static int vfio_fill_vconfig_bytes(struct vfio_pci_device *vdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_cap_init(struct vfio_pci_device *vdev)
|
||||
static int vfio_cap_init(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
u8 *map = vdev->pci_config_map;
|
||||
@ -1549,7 +1549,7 @@ static int vfio_cap_init(struct vfio_pci_device *vdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_ecap_init(struct vfio_pci_device *vdev)
|
||||
static int vfio_ecap_init(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
u8 *map = vdev->pci_config_map;
|
||||
@ -1669,7 +1669,7 @@ static const struct pci_device_id known_bogus_vf_intx_pin[] = {
|
||||
* for each area requiring emulated bits, but the array of pointers
|
||||
* would be comparable in size (at least for standard config space).
|
||||
*/
|
||||
int vfio_config_init(struct vfio_pci_device *vdev)
|
||||
int vfio_config_init(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
u8 *map, *vconfig;
|
||||
@ -1773,7 +1773,7 @@ out:
|
||||
return pcibios_err_to_errno(ret);
|
||||
}
|
||||
|
||||
void vfio_config_free(struct vfio_pci_device *vdev)
|
||||
void vfio_config_free(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
kfree(vdev->vconfig);
|
||||
vdev->vconfig = NULL;
|
||||
@ -1790,7 +1790,7 @@ void vfio_config_free(struct vfio_pci_device *vdev)
|
||||
* Find the remaining number of bytes in a dword that match the given
|
||||
* position. Stop at either the end of the capability or the dword boundary.
|
||||
*/
|
||||
static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev,
|
||||
static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_core_device *vdev,
|
||||
loff_t pos)
|
||||
{
|
||||
u8 cap = vdev->pci_config_map[pos];
|
||||
@ -1802,7 +1802,7 @@ static size_t vfio_pci_cap_remaining_dword(struct vfio_pci_device *vdev,
|
||||
return i;
|
||||
}
|
||||
|
||||
static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
static ssize_t vfio_config_do_rw(struct vfio_pci_core_device *vdev, char __user *buf,
|
||||
size_t count, loff_t *ppos, bool iswrite)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
@ -1885,7 +1885,7 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, char __user *buf,
|
||||
size_t count, loff_t *ppos, bool iswrite)
|
||||
{
|
||||
size_t done = 0;
|
||||
|
2158
drivers/vfio/pci/vfio_pci_core.c
Normal file
2158
drivers/vfio/pci/vfio_pci_core.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -15,7 +15,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/vfio.h>
|
||||
|
||||
#include "vfio_pci_private.h"
|
||||
#include <linux/vfio_pci_core.h>
|
||||
|
||||
#define OPREGION_SIGNATURE "IntelGraphicsMem"
|
||||
#define OPREGION_SIZE (8 * 1024)
|
||||
@ -25,8 +25,9 @@
|
||||
#define OPREGION_RVDS 0x3c2
|
||||
#define OPREGION_VERSION 0x16
|
||||
|
||||
static ssize_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
size_t count, loff_t *ppos, bool iswrite)
|
||||
static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
|
||||
char __user *buf, size_t count, loff_t *ppos,
|
||||
bool iswrite)
|
||||
{
|
||||
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
|
||||
void *base = vdev->region[i].data;
|
||||
@ -45,7 +46,7 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
return count;
|
||||
}
|
||||
|
||||
static void vfio_pci_igd_release(struct vfio_pci_device *vdev,
|
||||
static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev,
|
||||
struct vfio_pci_region *region)
|
||||
{
|
||||
memunmap(region->data);
|
||||
@ -56,7 +57,7 @@ static const struct vfio_pci_regops vfio_pci_igd_regops = {
|
||||
.release = vfio_pci_igd_release,
|
||||
};
|
||||
|
||||
static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev)
|
||||
static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
__le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
|
||||
u32 addr, size;
|
||||
@ -160,7 +161,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_device *vdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
|
||||
static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
|
||||
char __user *buf, size_t count, loff_t *ppos,
|
||||
bool iswrite)
|
||||
{
|
||||
@ -253,7 +254,7 @@ static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_device *vdev,
|
||||
return count;
|
||||
}
|
||||
|
||||
static void vfio_pci_igd_cfg_release(struct vfio_pci_device *vdev,
|
||||
static void vfio_pci_igd_cfg_release(struct vfio_pci_core_device *vdev,
|
||||
struct vfio_pci_region *region)
|
||||
{
|
||||
struct pci_dev *pdev = region->data;
|
||||
@ -266,7 +267,7 @@ static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
|
||||
.release = vfio_pci_igd_cfg_release,
|
||||
};
|
||||
|
||||
static int vfio_pci_igd_cfg_init(struct vfio_pci_device *vdev)
|
||||
static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
struct pci_dev *host_bridge, *lpc_bridge;
|
||||
int ret;
|
||||
@ -314,7 +315,7 @@ static int vfio_pci_igd_cfg_init(struct vfio_pci_device *vdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vfio_pci_igd_init(struct vfio_pci_device *vdev)
|
||||
int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -20,20 +20,20 @@
|
||||
#include <linux/wait.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "vfio_pci_private.h"
|
||||
#include <linux/vfio_pci_core.h>
|
||||
|
||||
/*
|
||||
* INTx
|
||||
*/
|
||||
static void vfio_send_intx_eventfd(void *opaque, void *unused)
|
||||
{
|
||||
struct vfio_pci_device *vdev = opaque;
|
||||
struct vfio_pci_core_device *vdev = opaque;
|
||||
|
||||
if (likely(is_intx(vdev) && !vdev->virq_disabled))
|
||||
eventfd_signal(vdev->ctx[0].trigger, 1);
|
||||
}
|
||||
|
||||
void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
|
||||
void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
unsigned long flags;
|
||||
@ -73,7 +73,7 @@ void vfio_pci_intx_mask(struct vfio_pci_device *vdev)
|
||||
*/
|
||||
static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
|
||||
{
|
||||
struct vfio_pci_device *vdev = opaque;
|
||||
struct vfio_pci_core_device *vdev = opaque;
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
@ -107,7 +107,7 @@ static int vfio_pci_intx_unmask_handler(void *opaque, void *unused)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
|
||||
void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
|
||||
vfio_send_intx_eventfd(vdev, NULL);
|
||||
@ -115,7 +115,7 @@ void vfio_pci_intx_unmask(struct vfio_pci_device *vdev)
|
||||
|
||||
static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct vfio_pci_device *vdev = dev_id;
|
||||
struct vfio_pci_core_device *vdev = dev_id;
|
||||
unsigned long flags;
|
||||
int ret = IRQ_NONE;
|
||||
|
||||
@ -139,7 +139,7 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vfio_intx_enable(struct vfio_pci_device *vdev)
|
||||
static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
if (!is_irq_none(vdev))
|
||||
return -EINVAL;
|
||||
@ -168,7 +168,7 @@ static int vfio_intx_enable(struct vfio_pci_device *vdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
|
||||
static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
unsigned long irqflags = IRQF_SHARED;
|
||||
@ -223,7 +223,7 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vfio_intx_disable(struct vfio_pci_device *vdev)
|
||||
static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
vfio_virqfd_disable(&vdev->ctx[0].unmask);
|
||||
vfio_virqfd_disable(&vdev->ctx[0].mask);
|
||||
@ -244,7 +244,7 @@ static irqreturn_t vfio_msihandler(int irq, void *arg)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
|
||||
static int vfio_msi_enable(struct vfio_pci_core_device *vdev, int nvec, bool msix)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
unsigned int flag = msix ? PCI_IRQ_MSIX : PCI_IRQ_MSI;
|
||||
@ -285,7 +285,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
||||
static int vfio_msi_set_vector_signal(struct vfio_pci_core_device *vdev,
|
||||
int vector, int fd, bool msix)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
@ -364,7 +364,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
|
||||
static int vfio_msi_set_block(struct vfio_pci_core_device *vdev, unsigned start,
|
||||
unsigned count, int32_t *fds, bool msix)
|
||||
{
|
||||
int i, j, ret = 0;
|
||||
@ -385,7 +385,7 @@ static int vfio_msi_set_block(struct vfio_pci_device *vdev, unsigned start,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
|
||||
static void vfio_msi_disable(struct vfio_pci_core_device *vdev, bool msix)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
int i;
|
||||
@ -417,7 +417,7 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
|
||||
/*
|
||||
* IOCTL support
|
||||
*/
|
||||
static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
|
||||
static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
|
||||
unsigned index, unsigned start,
|
||||
unsigned count, uint32_t flags, void *data)
|
||||
{
|
||||
@ -444,7 +444,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
|
||||
static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
|
||||
unsigned index, unsigned start,
|
||||
unsigned count, uint32_t flags, void *data)
|
||||
{
|
||||
@ -464,7 +464,7 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_device *vdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
|
||||
static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
|
||||
unsigned index, unsigned start,
|
||||
unsigned count, uint32_t flags, void *data)
|
||||
{
|
||||
@ -507,7 +507,7 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
|
||||
static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
|
||||
unsigned index, unsigned start,
|
||||
unsigned count, uint32_t flags, void *data)
|
||||
{
|
||||
@ -613,7 +613,7 @@ static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
|
||||
static int vfio_pci_set_err_trigger(struct vfio_pci_core_device *vdev,
|
||||
unsigned index, unsigned start,
|
||||
unsigned count, uint32_t flags, void *data)
|
||||
{
|
||||
@ -624,7 +624,7 @@ static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
|
||||
count, flags, data);
|
||||
}
|
||||
|
||||
static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
|
||||
static int vfio_pci_set_req_trigger(struct vfio_pci_core_device *vdev,
|
||||
unsigned index, unsigned start,
|
||||
unsigned count, uint32_t flags, void *data)
|
||||
{
|
||||
@ -635,11 +635,11 @@ static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
|
||||
count, flags, data);
|
||||
}
|
||||
|
||||
int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
|
||||
int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, uint32_t flags,
|
||||
unsigned index, unsigned start, unsigned count,
|
||||
void *data)
|
||||
{
|
||||
int (*func)(struct vfio_pci_device *vdev, unsigned index,
|
||||
int (*func)(struct vfio_pci_core_device *vdev, unsigned index,
|
||||
unsigned start, unsigned count, uint32_t flags,
|
||||
void *data) = NULL;
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <linux/vfio.h>
|
||||
#include <linux/vgaarb.h>
|
||||
|
||||
#include "vfio_pci_private.h"
|
||||
#include <linux/vfio_pci_core.h>
|
||||
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#define vfio_ioread64 ioread64
|
||||
@ -38,7 +38,7 @@
|
||||
#define vfio_iowrite8 iowrite8
|
||||
|
||||
#define VFIO_IOWRITE(size) \
|
||||
static int vfio_pci_iowrite##size(struct vfio_pci_device *vdev, \
|
||||
static int vfio_pci_iowrite##size(struct vfio_pci_core_device *vdev, \
|
||||
bool test_mem, u##size val, void __iomem *io) \
|
||||
{ \
|
||||
if (test_mem) { \
|
||||
@ -65,7 +65,7 @@ VFIO_IOWRITE(64)
|
||||
#endif
|
||||
|
||||
#define VFIO_IOREAD(size) \
|
||||
static int vfio_pci_ioread##size(struct vfio_pci_device *vdev, \
|
||||
static int vfio_pci_ioread##size(struct vfio_pci_core_device *vdev, \
|
||||
bool test_mem, u##size *val, void __iomem *io) \
|
||||
{ \
|
||||
if (test_mem) { \
|
||||
@ -94,7 +94,7 @@ VFIO_IOREAD(32)
|
||||
* reads with -1. This is intended for handling MSI-X vector tables and
|
||||
* leftover space for ROM BARs.
|
||||
*/
|
||||
static ssize_t do_io_rw(struct vfio_pci_device *vdev, bool test_mem,
|
||||
static ssize_t do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem,
|
||||
void __iomem *io, char __user *buf,
|
||||
loff_t off, size_t count, size_t x_start,
|
||||
size_t x_end, bool iswrite)
|
||||
@ -200,7 +200,7 @@ static ssize_t do_io_rw(struct vfio_pci_device *vdev, bool test_mem,
|
||||
return done;
|
||||
}
|
||||
|
||||
static int vfio_pci_setup_barmap(struct vfio_pci_device *vdev, int bar)
|
||||
static int vfio_pci_setup_barmap(struct vfio_pci_core_device *vdev, int bar)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
int ret;
|
||||
@ -224,7 +224,7 @@ static int vfio_pci_setup_barmap(struct vfio_pci_device *vdev, int bar)
|
||||
return 0;
|
||||
}
|
||||
|
||||
ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
|
||||
size_t count, loff_t *ppos, bool iswrite)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
@ -288,7 +288,7 @@ out:
|
||||
return done;
|
||||
}
|
||||
|
||||
ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
|
||||
size_t count, loff_t *ppos, bool iswrite)
|
||||
{
|
||||
int ret;
|
||||
@ -384,7 +384,7 @@ static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd,
|
||||
static int vfio_pci_ioeventfd_handler(void *opaque, void *unused)
|
||||
{
|
||||
struct vfio_pci_ioeventfd *ioeventfd = opaque;
|
||||
struct vfio_pci_device *vdev = ioeventfd->vdev;
|
||||
struct vfio_pci_core_device *vdev = ioeventfd->vdev;
|
||||
|
||||
if (ioeventfd->test_mem) {
|
||||
if (!down_read_trylock(&vdev->memory_lock))
|
||||
@ -410,7 +410,7 @@ static void vfio_pci_ioeventfd_thread(void *opaque, void *unused)
|
||||
vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem);
|
||||
}
|
||||
|
||||
long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
|
||||
long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
|
||||
uint64_t data, int count, int fd)
|
||||
{
|
||||
struct pci_dev *pdev = vdev->pdev;
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <asm/pci_clp.h>
|
||||
#include <asm/pci_io.h>
|
||||
|
||||
#include "vfio_pci_private.h"
|
||||
#include <linux/vfio_pci_core.h>
|
||||
|
||||
/*
|
||||
* Add the Base PCI Function information to the device info region.
|
||||
@ -109,7 +109,7 @@ static int zpci_pfip_cap(struct zpci_dev *zdev, struct vfio_info_cap *caps)
|
||||
/*
|
||||
* Add all supported capabilities to the VFIO_DEVICE_GET_INFO capability chain.
|
||||
*/
|
||||
int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
|
||||
int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
|
||||
struct vfio_info_cap *caps)
|
||||
{
|
||||
struct zpci_dev *zdev = to_zpci(vdev->pdev);
|
||||
|
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config VFIO_PLATFORM
|
||||
tristate "VFIO support for platform devices"
|
||||
depends on VFIO && EVENTFD && (ARM || ARM64 || COMPILE_TEST)
|
||||
depends on ARM || ARM64 || COMPILE_TEST
|
||||
select VFIO_VIRQFD
|
||||
help
|
||||
Support for platform devices with VFIO. This is required to make
|
||||
@ -10,9 +10,10 @@ config VFIO_PLATFORM
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
if VFIO_PLATFORM
|
||||
config VFIO_AMBA
|
||||
tristate "VFIO support for AMBA devices"
|
||||
depends on VFIO_PLATFORM && (ARM_AMBA || COMPILE_TEST)
|
||||
depends on ARM_AMBA || COMPILE_TEST
|
||||
help
|
||||
Support for ARM AMBA devices with VFIO. This is required to make
|
||||
use of ARM AMBA devices present on the system using the VFIO
|
||||
@ -21,3 +22,4 @@ config VFIO_AMBA
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
source "drivers/vfio/platform/reset/Kconfig"
|
||||
endif
|
||||
|
@ -1,7 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config VFIO_PLATFORM_CALXEDAXGMAC_RESET
|
||||
tristate "VFIO support for calxeda xgmac reset"
|
||||
depends on VFIO_PLATFORM
|
||||
help
|
||||
Enables the VFIO platform driver to handle reset for Calxeda xgmac
|
||||
|
||||
@ -9,7 +8,6 @@ config VFIO_PLATFORM_CALXEDAXGMAC_RESET
|
||||
|
||||
config VFIO_PLATFORM_AMDXGBE_RESET
|
||||
tristate "VFIO support for AMD XGBE reset"
|
||||
depends on VFIO_PLATFORM
|
||||
help
|
||||
Enables the VFIO platform driver to handle reset for AMD XGBE
|
||||
|
||||
@ -17,7 +15,7 @@ config VFIO_PLATFORM_AMDXGBE_RESET
|
||||
|
||||
config VFIO_PLATFORM_BCMFLEXRM_RESET
|
||||
tristate "VFIO support for Broadcom FlexRM reset"
|
||||
depends on VFIO_PLATFORM && (ARCH_BCM_IPROC || COMPILE_TEST)
|
||||
depends on ARCH_BCM_IPROC || COMPILE_TEST
|
||||
default ARCH_BCM_IPROC
|
||||
help
|
||||
Enables the VFIO platform driver to handle reset for Broadcom FlexRM
|
||||
|
@ -612,17 +612,17 @@ static int vfio_wait(struct vfio_iommu *iommu)
|
||||
static int vfio_find_dma_valid(struct vfio_iommu *iommu, dma_addr_t start,
|
||||
size_t size, struct vfio_dma **dma_p)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
do {
|
||||
*dma_p = vfio_find_dma(iommu, start, size);
|
||||
if (!*dma_p)
|
||||
ret = -EINVAL;
|
||||
return -EINVAL;
|
||||
else if (!(*dma_p)->vaddr_invalid)
|
||||
ret = 0;
|
||||
return ret;
|
||||
else
|
||||
ret = vfio_wait(iommu);
|
||||
} while (ret > 0);
|
||||
} while (ret == WAITED);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -16,6 +16,10 @@ typedef unsigned long kernel_ulong_t;
|
||||
|
||||
#define PCI_ANY_ID (~0)
|
||||
|
||||
enum {
|
||||
PCI_ID_F_VFIO_DRIVER_OVERRIDE = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct pci_device_id - PCI device ID structure
|
||||
* @vendor: Vendor ID to match (or PCI_ANY_ID)
|
||||
@ -34,12 +38,14 @@ typedef unsigned long kernel_ulong_t;
|
||||
* Best practice is to use driver_data as an index
|
||||
* into a static list of equivalent device types,
|
||||
* instead of using it as a pointer.
|
||||
* @override_only: Match only when dev->driver_override is this driver.
|
||||
*/
|
||||
struct pci_device_id {
|
||||
__u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
|
||||
__u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
|
||||
__u32 class, class_mask; /* (class,subclass,prog-if) triplet */
|
||||
kernel_ulong_t driver_data; /* Data private to the driver */
|
||||
__u32 override_only;
|
||||
};
|
||||
|
||||
|
||||
|
@ -901,6 +901,35 @@ struct pci_driver {
|
||||
.vendor = (vend), .device = (dev), \
|
||||
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
|
||||
|
||||
/**
|
||||
* PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with
|
||||
* override_only flags.
|
||||
* @vend: the 16 bit PCI Vendor ID
|
||||
* @dev: the 16 bit PCI Device ID
|
||||
* @driver_override: the 32 bit PCI Device override_only
|
||||
*
|
||||
* This macro is used to create a struct pci_device_id that matches only a
|
||||
* driver_override device. The subvendor and subdevice fields will be set to
|
||||
* PCI_ANY_ID.
|
||||
*/
|
||||
#define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
|
||||
.vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \
|
||||
.subdevice = PCI_ANY_ID, .override_only = (driver_override)
|
||||
|
||||
/**
|
||||
* PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO
|
||||
* "driver_override" PCI device.
|
||||
* @vend: the 16 bit PCI Vendor ID
|
||||
* @dev: the 16 bit PCI Device ID
|
||||
*
|
||||
* This macro is used to create a struct pci_device_id that matches a
|
||||
* specific device. The subvendor and subdevice fields will be set to
|
||||
* PCI_ANY_ID and the driver_override will be set to
|
||||
* PCI_ID_F_VFIO_DRIVER_OVERRIDE.
|
||||
*/
|
||||
#define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \
|
||||
PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE)
|
||||
|
||||
/**
|
||||
* PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem
|
||||
* @vend: the 16 bit PCI Vendor ID
|
||||
|
@ -10,13 +10,14 @@
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/vfio.h>
|
||||
#include <linux/irqbypass.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#ifndef VFIO_PCI_PRIVATE_H
|
||||
#define VFIO_PCI_PRIVATE_H
|
||||
#ifndef VFIO_PCI_CORE_H
|
||||
#define VFIO_PCI_CORE_H
|
||||
|
||||
#define VFIO_PCI_OFFSET_SHIFT 40
|
||||
|
||||
@ -33,7 +34,7 @@
|
||||
|
||||
struct vfio_pci_ioeventfd {
|
||||
struct list_head next;
|
||||
struct vfio_pci_device *vdev;
|
||||
struct vfio_pci_core_device *vdev;
|
||||
struct virqfd *virqfd;
|
||||
void __iomem *addr;
|
||||
uint64_t data;
|
||||
@ -52,18 +53,18 @@ struct vfio_pci_irq_ctx {
|
||||
struct irq_bypass_producer producer;
|
||||
};
|
||||
|
||||
struct vfio_pci_device;
|
||||
struct vfio_pci_core_device;
|
||||
struct vfio_pci_region;
|
||||
|
||||
struct vfio_pci_regops {
|
||||
ssize_t (*rw)(struct vfio_pci_device *vdev, char __user *buf,
|
||||
ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf,
|
||||
size_t count, loff_t *ppos, bool iswrite);
|
||||
void (*release)(struct vfio_pci_device *vdev,
|
||||
void (*release)(struct vfio_pci_core_device *vdev,
|
||||
struct vfio_pci_region *region);
|
||||
int (*mmap)(struct vfio_pci_device *vdev,
|
||||
int (*mmap)(struct vfio_pci_core_device *vdev,
|
||||
struct vfio_pci_region *region,
|
||||
struct vm_area_struct *vma);
|
||||
int (*add_capability)(struct vfio_pci_device *vdev,
|
||||
int (*add_capability)(struct vfio_pci_core_device *vdev,
|
||||
struct vfio_pci_region *region,
|
||||
struct vfio_info_cap *caps);
|
||||
};
|
||||
@ -94,7 +95,7 @@ struct vfio_pci_mmap_vma {
|
||||
struct list_head vma_next;
|
||||
};
|
||||
|
||||
struct vfio_pci_device {
|
||||
struct vfio_pci_core_device {
|
||||
struct vfio_device vdev;
|
||||
struct pci_dev *pdev;
|
||||
void __iomem *barmap[PCI_STD_NUM_BARS];
|
||||
@ -144,65 +145,95 @@ struct vfio_pci_device {
|
||||
#define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev)))
|
||||
#define irq_is(vdev, type) (vdev->irq_type == type)
|
||||
|
||||
extern void vfio_pci_intx_mask(struct vfio_pci_device *vdev);
|
||||
extern void vfio_pci_intx_unmask(struct vfio_pci_device *vdev);
|
||||
extern void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev);
|
||||
extern void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev);
|
||||
|
||||
extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev,
|
||||
extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev,
|
||||
uint32_t flags, unsigned index,
|
||||
unsigned start, unsigned count, void *data);
|
||||
|
||||
extern ssize_t vfio_pci_config_rw(struct vfio_pci_device *vdev,
|
||||
extern ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev,
|
||||
char __user *buf, size_t count,
|
||||
loff_t *ppos, bool iswrite);
|
||||
|
||||
extern ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
extern ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
|
||||
size_t count, loff_t *ppos, bool iswrite);
|
||||
|
||||
extern ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
|
||||
extern ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
|
||||
size_t count, loff_t *ppos, bool iswrite);
|
||||
|
||||
extern long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset,
|
||||
extern long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
|
||||
uint64_t data, int count, int fd);
|
||||
|
||||
extern int vfio_pci_init_perm_bits(void);
|
||||
extern void vfio_pci_uninit_perm_bits(void);
|
||||
|
||||
extern int vfio_config_init(struct vfio_pci_device *vdev);
|
||||
extern void vfio_config_free(struct vfio_pci_device *vdev);
|
||||
extern int vfio_config_init(struct vfio_pci_core_device *vdev);
|
||||
extern void vfio_config_free(struct vfio_pci_core_device *vdev);
|
||||
|
||||
extern int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
|
||||
extern int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
|
||||
unsigned int type, unsigned int subtype,
|
||||
const struct vfio_pci_regops *ops,
|
||||
size_t size, u32 flags, void *data);
|
||||
|
||||
extern int vfio_pci_set_power_state(struct vfio_pci_device *vdev,
|
||||
extern int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev,
|
||||
pci_power_t state);
|
||||
|
||||
extern bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev);
|
||||
extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_device
|
||||
extern bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
|
||||
extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device
|
||||
*vdev);
|
||||
extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_device *vdev);
|
||||
extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_device *vdev,
|
||||
extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev);
|
||||
extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev,
|
||||
u16 cmd);
|
||||
|
||||
#ifdef CONFIG_VFIO_PCI_IGD
|
||||
extern int vfio_pci_igd_init(struct vfio_pci_device *vdev);
|
||||
extern int vfio_pci_igd_init(struct vfio_pci_core_device *vdev);
|
||||
#else
|
||||
static inline int vfio_pci_igd_init(struct vfio_pci_device *vdev)
|
||||
static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_S390
|
||||
extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
|
||||
extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
|
||||
struct vfio_info_cap *caps);
|
||||
#else
|
||||
static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_device *vdev,
|
||||
static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
|
||||
struct vfio_info_cap *caps)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* VFIO_PCI_PRIVATE_H */
|
||||
/* Will be exported for vfio pci drivers usage */
|
||||
void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga,
|
||||
bool is_disable_idle_d3);
|
||||
void vfio_pci_core_close_device(struct vfio_device *core_vdev);
|
||||
void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
|
||||
struct pci_dev *pdev,
|
||||
const struct vfio_device_ops *vfio_pci_ops);
|
||||
int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev);
|
||||
void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev);
|
||||
void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev);
|
||||
int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn);
|
||||
extern const struct pci_error_handlers vfio_pci_core_err_handlers;
|
||||
long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
|
||||
size_t count, loff_t *ppos);
|
||||
ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
|
||||
size_t count, loff_t *ppos);
|
||||
int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
|
||||
void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
|
||||
int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
|
||||
int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
|
||||
void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
|
||||
void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
|
||||
|
||||
static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
|
||||
{
|
||||
return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
|
||||
}
|
||||
|
||||
#endif /* VFIO_PCI_CORE_H */
|
@ -42,6 +42,7 @@ int main(void)
|
||||
DEVID_FIELD(pci_device_id, subdevice);
|
||||
DEVID_FIELD(pci_device_id, class);
|
||||
DEVID_FIELD(pci_device_id, class_mask);
|
||||
DEVID_FIELD(pci_device_id, override_only);
|
||||
|
||||
DEVID(ccw_device_id);
|
||||
DEVID_FIELD(ccw_device_id, match_flags);
|
||||
|
@ -426,7 +426,7 @@ static int do_ieee1394_entry(const char *filename,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Looks like: pci:vNdNsvNsdNbcNscNiN. */
|
||||
/* Looks like: pci:vNdNsvNsdNbcNscNiN or <prefix>_pci:vNdNsvNsdNbcNscNiN. */
|
||||
static int do_pci_entry(const char *filename,
|
||||
void *symval, char *alias)
|
||||
{
|
||||
@ -440,8 +440,21 @@ static int do_pci_entry(const char *filename,
|
||||
DEF_FIELD(symval, pci_device_id, subdevice);
|
||||
DEF_FIELD(symval, pci_device_id, class);
|
||||
DEF_FIELD(symval, pci_device_id, class_mask);
|
||||
DEF_FIELD(symval, pci_device_id, override_only);
|
||||
|
||||
switch (override_only) {
|
||||
case 0:
|
||||
strcpy(alias, "pci:");
|
||||
break;
|
||||
case PCI_ID_F_VFIO_DRIVER_OVERRIDE:
|
||||
strcpy(alias, "vfio_pci:");
|
||||
break;
|
||||
default:
|
||||
warn("Unknown PCI driver_override alias %08X\n",
|
||||
override_only);
|
||||
return 0;
|
||||
}
|
||||
|
||||
strcpy(alias, "pci:");
|
||||
ADD(alias, "v", vendor != PCI_ANY_ID, vendor);
|
||||
ADD(alias, "d", device != PCI_ANY_ID, device);
|
||||
ADD(alias, "sv", subvendor != PCI_ANY_ID, subvendor);
|
||||
|
Loading…
x
Reference in New Issue
Block a user