iommu/msm: Update to {map,unmap}_pages

Update map/unmap to the new multi-page interfaces, which is dead easy
since we just pass them through to io-pgtable anyway.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/24a8f522710ddd6bbac4da154aa28799e939ebe4.1668100209.git.robin.murphy@arm.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Robin Murphy 2022-11-15 15:26:38 +00:00 committed by Joerg Roedel
parent 85637380da
commit 8b35cdcf9b

View File

@ -471,14 +471,16 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
} }
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t len, int prot, gfp_t gfp) phys_addr_t pa, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{ {
struct msm_priv *priv = to_msm_priv(domain); struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags; unsigned long flags;
int ret; int ret;
spin_lock_irqsave(&priv->pgtlock, flags); spin_lock_irqsave(&priv->pgtlock, flags);
ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC); ret = priv->iop->map_pages(priv->iop, iova, pa, pgsize, pgcount, prot,
GFP_ATOMIC, mapped);
spin_unlock_irqrestore(&priv->pgtlock, flags); spin_unlock_irqrestore(&priv->pgtlock, flags);
return ret; return ret;
@ -493,16 +495,18 @@ static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
} }
static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t len, struct iommu_iotlb_gather *gather) size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather)
{ {
struct msm_priv *priv = to_msm_priv(domain); struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags; unsigned long flags;
size_t ret;
spin_lock_irqsave(&priv->pgtlock, flags); spin_lock_irqsave(&priv->pgtlock, flags);
len = priv->iop->unmap(priv->iop, iova, len, gather); ret = priv->iop->unmap_pages(priv->iop, iova, pgsize, pgcount, gather);
spin_unlock_irqrestore(&priv->pgtlock, flags); spin_unlock_irqrestore(&priv->pgtlock, flags);
return len; return ret;
} }
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@ -679,8 +683,8 @@ static struct iommu_ops msm_iommu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) { .default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = msm_iommu_attach_dev, .attach_dev = msm_iommu_attach_dev,
.detach_dev = msm_iommu_detach_dev, .detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map, .map_pages = msm_iommu_map,
.unmap = msm_iommu_unmap, .unmap_pages = msm_iommu_unmap,
/* /*
* Nothing is needed here, the barrier to guarantee * Nothing is needed here, the barrier to guarantee
* completion of the tlb sync operation is implicitly * completion of the tlb sync operation is implicitly