mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-09 14:50:19 +00:00
iommu/amd: Refactor amd_iommu_domain_enable_v2 to remove locking
The current function to enable IOMMU v2 also lock the domain. In order to reuse the same code in different code path, in which the domain has already been locked, refactor the function to separate the locking from the enabling logic. Co-developed-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Link: https://lore.kernel.org/r/20220825063939.8360-5-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
6b080c4e81
commit
43312b710b
@ -85,6 +85,7 @@ struct iommu_cmd {
|
||||
struct kmem_cache *amd_iommu_irq_cache;
|
||||
|
||||
static void detach_device(struct device *dev);
|
||||
static int domain_enable_v2(struct protection_domain *domain, int pasids);
|
||||
|
||||
/****************************************************************************
|
||||
*
|
||||
@ -2450,11 +2451,10 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
|
||||
}
|
||||
EXPORT_SYMBOL(amd_iommu_domain_direct_map);
|
||||
|
||||
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
|
||||
/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
|
||||
static int domain_enable_v2(struct protection_domain *domain, int pasids)
|
||||
{
|
||||
struct protection_domain *domain = to_pdomain(dom);
|
||||
unsigned long flags;
|
||||
int levels, ret;
|
||||
int levels;
|
||||
|
||||
/* Number of GCR3 table levels required */
|
||||
for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
|
||||
@ -2463,7 +2463,25 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
|
||||
if (levels > amd_iommu_max_glx_val)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (domain->gcr3_tbl == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
domain->glx = levels;
|
||||
domain->flags |= PD_IOMMUV2_MASK;
|
||||
|
||||
amd_iommu_domain_update(domain);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
|
||||
{
|
||||
struct protection_domain *pdom = to_pdomain(dom);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&pdom->lock, flags);
|
||||
|
||||
/*
|
||||
* Save us all sanity checks whether devices already in the
|
||||
@ -2471,24 +2489,14 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
|
||||
* devices attached when it is switched into IOMMUv2 mode.
|
||||
*/
|
||||
ret = -EBUSY;
|
||||
if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
|
||||
if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK)
|
||||
goto out;
|
||||
|
||||
ret = -ENOMEM;
|
||||
domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (domain->gcr3_tbl == NULL)
|
||||
goto out;
|
||||
|
||||
domain->glx = levels;
|
||||
domain->flags |= PD_IOMMUV2_MASK;
|
||||
|
||||
amd_iommu_domain_update(domain);
|
||||
|
||||
ret = 0;
|
||||
if (!pdom->gcr3_tbl)
|
||||
ret = domain_enable_v2(pdom, pasids);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
spin_unlock_irqrestore(&pdom->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
|
||||
|
Loading…
x
Reference in New Issue
Block a user