iommu/arm-smmu-v3: Split arm_smmu_tlb_inv_range()

Extract some of the cmd initialization and the ATC invalidation from
arm_smmu_tlb_inv_range(), to allow an MMU notifier to invalidate a VA
range by ASID.

Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Link: https://lore.kernel.org/r/20210122151054.2833521-2-jean-philippe@linaro.org
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
Jean-Philippe Brucker 2021-01-22 16:10:54 +01:00 committed by Will Deacon
parent 932bc8c7d7
commit eba8d2f8f8

View File

@ -1658,40 +1658,28 @@ static void arm_smmu_tlb_inv_context(void *cookie)
arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
} }
static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
size_t granule, bool leaf, unsigned long iova, size_t size,
struct arm_smmu_domain *smmu_domain) size_t granule,
struct arm_smmu_domain *smmu_domain)
{ {
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0; unsigned long end = iova + size, num_pages = 0, tg = 0;
size_t inv_range = granule; size_t inv_range = granule;
struct arm_smmu_cmdq_batch cmds = {}; struct arm_smmu_cmdq_batch cmds = {};
struct arm_smmu_cmdq_ent cmd = {
.tlbi = {
.leaf = leaf,
},
};
if (!size) if (!size)
return; return;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cmd.opcode = CMDQ_OP_TLBI_NH_VA;
cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
} else {
cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
/* Get the leaf page size */ /* Get the leaf page size */
tg = __ffs(smmu_domain->domain.pgsize_bitmap); tg = __ffs(smmu_domain->domain.pgsize_bitmap);
/* Convert page size of 12,14,16 (log2) to 1,2,3 */ /* Convert page size of 12,14,16 (log2) to 1,2,3 */
cmd.tlbi.tg = (tg - 10) / 2; cmd->tlbi.tg = (tg - 10) / 2;
/* Determine what level the granule is at */ /* Determine what level the granule is at */
cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
num_pages = size >> tg; num_pages = size >> tg;
} }
@ -1709,11 +1697,11 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
/* Determine the power of 2 multiple number of pages */ /* Determine the power of 2 multiple number of pages */
scale = __ffs(num_pages); scale = __ffs(num_pages);
cmd.tlbi.scale = scale; cmd->tlbi.scale = scale;
/* Determine how many chunks of 2^scale size we have */ /* Determine how many chunks of 2^scale size we have */
num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX; num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
cmd.tlbi.num = num - 1; cmd->tlbi.num = num - 1;
/* range is num * 2^scale * pgsize */ /* range is num * 2^scale * pgsize */
inv_range = num << (scale + tg); inv_range = num << (scale + tg);
@ -1722,17 +1710,37 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
num_pages -= num << scale; num_pages -= num << scale;
} }
cmd.tlbi.addr = iova; cmd->tlbi.addr = iova;
arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); arm_smmu_cmdq_batch_add(smmu, &cmds, cmd);
iova += inv_range; iova += inv_range;
} }
arm_smmu_cmdq_batch_submit(smmu, &cmds); arm_smmu_cmdq_batch_submit(smmu, &cmds);
}
static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
size_t granule, bool leaf,
struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_cmdq_ent cmd = {
.tlbi = {
.leaf = leaf,
},
};
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
cmd.opcode = CMDQ_OP_TLBI_NH_VA;
cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
} else {
cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
__arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain);
/* /*
* Unfortunately, this can't be leaf-only since we may have * Unfortunately, this can't be leaf-only since we may have
* zapped an entire table. * zapped an entire table.
*/ */
arm_smmu_atc_inv_domain(smmu_domain, 0, start, size); arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size);
} }
static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
@ -1748,7 +1756,7 @@ static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
size_t granule, void *cookie) size_t granule, void *cookie)
{ {
arm_smmu_tlb_inv_range(iova, size, granule, false, cookie); arm_smmu_tlb_inv_range_domain(iova, size, granule, false, cookie);
} }
static const struct iommu_flush_ops arm_smmu_flush_ops = { static const struct iommu_flush_ops arm_smmu_flush_ops = {
@ -2271,8 +2279,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
{ {
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start, arm_smmu_tlb_inv_range_domain(gather->start,
gather->pgsize, true, smmu_domain); gather->end - gather->start,
gather->pgsize, true, smmu_domain);
} }
static phys_addr_t static phys_addr_t