iommu/amd: Declare functions as extern

And move declaration to header file so that they can be included across
multiple files. There is no functional change.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20201215073705.123786-6-suravee.suthikulpanit@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Suravee Suthikulpanit 2020-12-15 01:36:57 -06:00 committed by Joerg Roedel
parent 1f58553066
commit f9b4df790a
2 changed files with 22 additions and 20 deletions

View File

@ -57,6 +57,9 @@ extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
extern int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
u64 address);
extern void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
extern void amd_iommu_domain_update(struct protection_domain *domain);
extern void amd_iommu_domain_flush_complete(struct protection_domain *domain);
extern void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain);
extern int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3);

View File

@ -86,7 +86,6 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain);
static void detach_device(struct device *dev);
/****************************************************************************
@ -1313,12 +1312,12 @@ static void domain_flush_pages(struct protection_domain *domain,
}
/* Flush the whole IO/TLB for a given protection domain - including PDE */
static void domain_flush_tlb_pde(struct protection_domain *domain)
void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain)
{
__domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
}
static void domain_flush_complete(struct protection_domain *domain)
void amd_iommu_domain_flush_complete(struct protection_domain *domain)
{
int i;
@ -1343,7 +1342,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
spin_lock_irqsave(&domain->lock, flags);
domain_flush_pages(domain, iova, size);
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags);
}
}
@ -1500,7 +1499,7 @@ static bool increase_address_space(struct protection_domain *domain,
pgtable.root = pte;
pgtable.mode += 1;
amd_iommu_update_and_flush_device_table(domain);
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
/*
* Device Table needs to be updated and flushed before the new root can
@ -1753,8 +1752,8 @@ out:
* Updates and flushing already happened in
* increase_address_space().
*/
domain_flush_tlb_pde(dom);
domain_flush_complete(dom);
amd_iommu_domain_flush_tlb_pde(dom);
amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
@ -1997,10 +1996,10 @@ static void do_detach(struct iommu_dev_data *dev_data)
device_flush_dte(dev_data);
/* Flush IOTLB */
domain_flush_tlb_pde(domain);
amd_iommu_domain_flush_tlb_pde(domain);
/* Wait for the flushes to finish */
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
/* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1;
@ -2133,9 +2132,9 @@ skip_ats_check:
* left the caches in the IOMMU dirty. So we have to flush
* here to evict all dirty stuff.
*/
domain_flush_tlb_pde(domain);
amd_iommu_domain_flush_tlb_pde(domain);
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
out:
spin_unlock(&dev_data->lock);
@ -2297,7 +2296,7 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
domain_flush_devices(domain);
}
static void update_domain(struct protection_domain *domain)
void amd_iommu_domain_update(struct protection_domain *domain)
{
struct domain_pgtable pgtable;
@ -2306,8 +2305,8 @@ static void update_domain(struct protection_domain *domain)
amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */
domain_flush_tlb_pde(domain);
domain_flush_complete(domain);
amd_iommu_domain_flush_tlb_pde(domain);
amd_iommu_domain_flush_complete(domain);
}
int __init amd_iommu_init_api(void)
@ -2695,8 +2694,8 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
unsigned long flags;
spin_lock_irqsave(&dom->lock, flags);
domain_flush_tlb_pde(dom);
domain_flush_complete(dom);
amd_iommu_domain_flush_tlb_pde(dom);
amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags);
}
@ -2786,7 +2785,7 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
amd_iommu_domain_clr_pt_root(domain);
/* Make changes visible to IOMMUs */
update_domain(domain);
amd_iommu_domain_update(domain);
/* Page-table is not visible to IOMMU anymore, so free it */
free_pagetable(&pgtable);
@ -2830,7 +2829,7 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
domain->glx = levels;
domain->flags |= PD_IOMMUV2_MASK;
update_domain(domain);
amd_iommu_domain_update(domain);
ret = 0;
@ -2867,7 +2866,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
}
/* Wait until IOMMU TLB flushes are complete */
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
/* Now flush device TLBs */
list_for_each_entry(dev_data, &domain->dev_list, list) {
@ -2893,7 +2892,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
}
/* Wait until all device TLBs are flushed */
domain_flush_complete(domain);
amd_iommu_domain_flush_complete(domain);
ret = 0;