diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 65b8e4fd8217..07c47a7088be 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2174,13 +2174,13 @@ static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom, struct protection_domain *domain = to_pdomain(dom); struct io_pgtable_ops *ops = &domain->iop.iop.ops; - if (ops->map) + if (ops->map_pages) domain_flush_np_cache(domain, iova, size); } -static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, - phys_addr_t paddr, size_t page_size, int iommu_prot, - gfp_t gfp) +static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int iommu_prot, gfp_t gfp, size_t *mapped) { struct protection_domain *domain = to_pdomain(dom); struct io_pgtable_ops *ops = &domain->iop.iop.ops; @@ -2196,8 +2196,10 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, if (iommu_prot & IOMMU_WRITE) prot |= IOMMU_PROT_IW; - if (ops->map) - ret = ops->map(ops, iova, paddr, page_size, prot, gfp); + if (ops->map_pages) { + ret = ops->map_pages(ops, iova, paddr, pgsize, + pgcount, prot, gfp, mapped); + } return ret; } @@ -2223,9 +2225,9 @@ static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain, iommu_iotlb_gather_add_range(gather, iova, size); } -static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, - size_t page_size, - struct iommu_iotlb_gather *gather) +static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather) { struct protection_domain *domain = to_pdomain(dom); struct io_pgtable_ops *ops = &domain->iop.iop.ops; @@ -2235,9 +2237,10 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, (domain->iop.mode == PAGE_MODE_NONE)) return 0; - r = (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0; + r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0; - amd_iommu_iotlb_gather_add_page(dom, gather, iova, page_size); + if (r) + amd_iommu_iotlb_gather_add_page(dom, gather, iova, r); return r; } @@ -2399,8 +2402,8 @@ const struct iommu_ops amd_iommu_ops = { .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = amd_iommu_attach_device, .detach_dev = amd_iommu_detach_device, - .map = amd_iommu_map, - .unmap = amd_iommu_unmap, + .map_pages = amd_iommu_map_pages, + .unmap_pages = amd_iommu_unmap_pages, .iotlb_sync_map = amd_iommu_iotlb_sync_map, .iova_to_phys = amd_iommu_iova_to_phys, .flush_iotlb_all = amd_iommu_flush_iotlb_all,