mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 16:53:49 +00:00
vfio: Remove VFIO_TYPE1_NESTING_IOMMU
This control causes the ARM SMMU drivers to choose a stage 2
implementation for the IO pagetable (vs the stage 1 usual default),
however this choice has no significant visible impact to the VFIO
user. Further qemu never implemented this and no other userspace user is
known.
The original description in commit f5c9ecebaf
("vfio/iommu_type1: add
new VFIO_TYPE1_NESTING_IOMMU IOMMU type") suggested this was to "provide
SMMU translation services to the guest operating system" however the rest
of the API to set the guest table pointer for the stage 1 and manage
invalidation was never completed, or at least never upstreamed, rendering
this part useless dead code.
Upstream has now settled on iommufd as the uAPI for controlling nested
translation. Choosing the stage 2 implementation should be done by through
the IOMMU_HWPT_ALLOC_NEST_PARENT flag during domain allocation.
Remove VFIO_TYPE1_NESTING_IOMMU and everything under it including the
enable_nesting iommu_domain_op.
Just in-case there is some userspace using this continue to treat
requesting it as a NOP, but do not advertise support any more.
Acked-by: Alex Williamson <alex.williamson@redhat.com>
Reviewed-by: Mostafa Saleh <smostafa@google.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Reviewed-by: Donald Dutile <ddutile@redhat.com>
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/1-v4-9e99b76f3518+3a8-smmuv3_nesting_jgg@nvidia.com
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
8e929cb546
commit
35890f8557
@ -3378,21 +3378,6 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
|
||||
return group;
|
||||
}
|
||||
|
||||
static int arm_smmu_enable_nesting(struct iommu_domain *domain)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&smmu_domain->init_mutex);
|
||||
if (smmu_domain->smmu)
|
||||
ret = -EPERM;
|
||||
else
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
|
||||
mutex_unlock(&smmu_domain->init_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int arm_smmu_of_xlate(struct device *dev,
|
||||
const struct of_phandle_args *args)
|
||||
{
|
||||
@ -3514,7 +3499,6 @@ static struct iommu_ops arm_smmu_ops = {
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.free = arm_smmu_domain_free_paging,
|
||||
}
|
||||
};
|
||||
|
@ -1558,21 +1558,6 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
|
||||
return group;
|
||||
}
|
||||
|
||||
static int arm_smmu_enable_nesting(struct iommu_domain *domain)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&smmu_domain->init_mutex);
|
||||
if (smmu_domain->smmu)
|
||||
ret = -EPERM;
|
||||
else
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
|
||||
mutex_unlock(&smmu_domain->init_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain,
|
||||
unsigned long quirks)
|
||||
{
|
||||
@ -1656,7 +1641,6 @@ static struct iommu_ops arm_smmu_ops = {
|
||||
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
|
||||
.iotlb_sync = arm_smmu_iotlb_sync,
|
||||
.iova_to_phys = arm_smmu_iova_to_phys,
|
||||
.enable_nesting = arm_smmu_enable_nesting,
|
||||
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
|
||||
.free = arm_smmu_domain_free,
|
||||
}
|
||||
|
@ -2723,16 +2723,6 @@ static int __init iommu_init(void)
|
||||
}
|
||||
core_initcall(iommu_init);
|
||||
|
||||
int iommu_enable_nesting(struct iommu_domain *domain)
|
||||
{
|
||||
if (domain->type != IOMMU_DOMAIN_UNMANAGED)
|
||||
return -EINVAL;
|
||||
if (!domain->ops->enable_nesting)
|
||||
return -EINVAL;
|
||||
return domain->ops->enable_nesting(domain);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_enable_nesting);
|
||||
|
||||
int iommu_set_pgtable_quirks(struct iommu_domain *domain,
|
||||
unsigned long quirk)
|
||||
{
|
||||
|
@ -291,12 +291,7 @@ static int iommufd_vfio_check_extension(struct iommufd_ctx *ictx,
|
||||
case VFIO_DMA_CC_IOMMU:
|
||||
return iommufd_vfio_cc_iommu(ictx);
|
||||
|
||||
/*
|
||||
* This is obsolete, and to be removed from VFIO. It was an incomplete
|
||||
* idea that got merged.
|
||||
* https://lore.kernel.org/kvm/0-v1-0093c9b0e345+19-vfio_no_nesting_jgg@nvidia.com/
|
||||
*/
|
||||
case VFIO_TYPE1_NESTING_IOMMU:
|
||||
case __VFIO_RESERVED_TYPE1_NESTING_IOMMU:
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -72,7 +72,6 @@ struct vfio_iommu {
|
||||
uint64_t pgsize_bitmap;
|
||||
uint64_t num_non_pinned_groups;
|
||||
bool v2;
|
||||
bool nesting;
|
||||
bool dirty_page_tracking;
|
||||
struct list_head emulated_iommu_groups;
|
||||
};
|
||||
@ -2195,12 +2194,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
||||
goto out_free_domain;
|
||||
}
|
||||
|
||||
if (iommu->nesting) {
|
||||
ret = iommu_enable_nesting(domain->domain);
|
||||
if (ret)
|
||||
goto out_domain;
|
||||
}
|
||||
|
||||
ret = iommu_attach_group(domain->domain, group->iommu_group);
|
||||
if (ret)
|
||||
goto out_domain;
|
||||
@ -2541,9 +2534,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
|
||||
switch (arg) {
|
||||
case VFIO_TYPE1_IOMMU:
|
||||
break;
|
||||
case VFIO_TYPE1_NESTING_IOMMU:
|
||||
iommu->nesting = true;
|
||||
fallthrough;
|
||||
case __VFIO_RESERVED_TYPE1_NESTING_IOMMU:
|
||||
case VFIO_TYPE1v2_IOMMU:
|
||||
iommu->v2 = true;
|
||||
break;
|
||||
@ -2638,7 +2629,6 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
|
||||
switch (arg) {
|
||||
case VFIO_TYPE1_IOMMU:
|
||||
case VFIO_TYPE1v2_IOMMU:
|
||||
case VFIO_TYPE1_NESTING_IOMMU:
|
||||
case VFIO_UNMAP_ALL:
|
||||
return 1;
|
||||
case VFIO_UPDATE_VADDR:
|
||||
|
@ -635,7 +635,6 @@ struct iommu_ops {
|
||||
* @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
|
||||
* including no-snoop TLPs on PCIe or other platform
|
||||
* specific mechanisms.
|
||||
* @enable_nesting: Enable nesting
|
||||
* @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
|
||||
* @free: Release the domain after use.
|
||||
*/
|
||||
@ -663,7 +662,6 @@ struct iommu_domain_ops {
|
||||
dma_addr_t iova);
|
||||
|
||||
bool (*enforce_cache_coherency)(struct iommu_domain *domain);
|
||||
int (*enable_nesting)(struct iommu_domain *domain);
|
||||
int (*set_pgtable_quirks)(struct iommu_domain *domain,
|
||||
unsigned long quirks);
|
||||
|
||||
@ -844,7 +842,6 @@ extern void iommu_group_put(struct iommu_group *group);
|
||||
extern int iommu_group_id(struct iommu_group *group);
|
||||
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
|
||||
|
||||
int iommu_enable_nesting(struct iommu_domain *domain);
|
||||
int iommu_set_pgtable_quirks(struct iommu_domain *domain,
|
||||
unsigned long quirks);
|
||||
|
||||
|
@ -35,7 +35,7 @@
|
||||
#define VFIO_EEH 5
|
||||
|
||||
/* Two-stage IOMMU */
|
||||
#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
|
||||
#define __VFIO_RESERVED_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
|
||||
|
||||
#define VFIO_SPAPR_TCE_v2_IOMMU 7
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user