mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
IOMMU Updates for Linux v6.10
Including: - Core: - IOMMU memory usage observability - This will make the memory used for IO page tables explicitly visible. - Simplify arch_setup_dma_ops() - Intel VT-d: - Consolidate domain cache invalidation - Remove private data from page fault message - Allocate DMAR fault interrupts locally - Cleanup and refactoring - ARM-SMMUv2: - Support for fault debugging hardware on Qualcomm implementations - Re-land support for the ->domain_alloc_paging() callback - ARM-SMMUv3: - Improve handling of MSI allocation failure - Drop support for the "disable_bypass" cmdline option - Major rework of the CD creation code, following on directly from the STE rework merged last time around. - Add unit tests for the new STE/CD manipulation logic - AMD-Vi: - Final part of SVA changes with generic IO page fault handling - Renesas IPMMU: - Add support for R8A779H0 hardware - A couple smaller fixes and updates across the sub-tree -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmZHJMkACgkQK/BELZcB GuND1Q/+M4RN5jM66XCfhqoP8QaI8I7zDlPDd14ismx0bjtOZhoiXpptKkAA8guo 7mS57MLqBw/hKYucm1mw+F1qi1HnRWSstKXiCPmzDm3UXYgZJlKkrOw6vydFeHJH zx2ei7TmBrc0SrsybWK3NWRfVBBkO8enGZTmti0DfHL/rOFcUM0LHegY51GcDaaH SlDr+LLDMeGynSQWhRlVNJVmEI5gpVPitY/mDUpVPoELiW9C0WGk8kPlR11z2pCR eUNiqGJUcGasOhmfiYnpJR462eg7J41glquu+YHj8ivPbbu3C4wxgruY/tR4dmJG 8s6AMAWR53JzG2SrCCwtzyRPSXmKfvixF+VKmlB2Ksc7VAn1xA0DYnY5Tx99EtXu qcEaR4SICMti0urmBGo/cGFdXi2TB1ccXqwoRtp1N3KiYnnOaQdLNO9qZdl9uUTI uleXACzkCVSssSpBfGjFcPyHU4r3WjMfX0f5ZJPpFMoQmvwV1yeMX7xTEZz4Sxew cHfBt9FAW9+4mBMTQfokBt0hZ6jwKcYl/z3Xi2oD+Ik/Qrzx5kcLA8LZLEVRXIBa SZh2ASazq/dr8YoZ744VRmlmi+nISAIHbbQMeqQEQgYQh0HpwS9g5HtpsBzNP6aB 91RHqZSccb/zNdi8e+RH79Y7pX/G5QcuVKcW6KQUBcAAb6hAgOg= =JUzp -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull iommu updates from Joerg Roedel: "Core: - IOMMU memory usage observability - This will make the memory used for IO page tables explicitly visible. - Simplify arch_setup_dma_ops() Intel VT-d: - Consolidate domain cache invalidation - Remove private data from page fault message - Allocate DMAR fault interrupts locally - Cleanup and refactoring ARM-SMMUv2: - Support for fault debugging hardware on Qualcomm implementations - Re-land support for the ->domain_alloc_paging() callback ARM-SMMUv3: - Improve handling of MSI allocation failure - Drop support for the "disable_bypass" cmdline option - Major rework of the CD creation code, following on directly from the STE rework merged last time around. - Add unit tests for the new STE/CD manipulation logic AMD-Vi: - Final part of SVA changes with generic IO page fault handling Renesas IPMMU: - Add support for R8A779H0 hardware ... and a couple smaller fixes and updates across the sub-tree" * tag 'iommu-updates-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (80 commits) iommu/arm-smmu-v3: Make the kunit into a module arm64: Properly clean up iommu-dma remnants iommu/amd: Enable Guest Translation after reading IOMMU feature register iommu/vt-d: Decouple igfx_off from graphic identity mapping iommu/amd: Fix compilation error iommu/arm-smmu-v3: Add unit tests for arm_smmu_write_entry iommu/arm-smmu-v3: Build the whole CD in arm_smmu_make_s1_cd() iommu/arm-smmu-v3: Move the CD generation for SVA into a function iommu/arm-smmu-v3: Allocate the CD table entry in advance iommu/arm-smmu-v3: Make arm_smmu_alloc_cd_ptr() iommu/arm-smmu-v3: Consolidate clearing a CD table entry iommu/arm-smmu-v3: Move the CD generation for S1 domains into a function iommu/arm-smmu-v3: Make CD programming use arm_smmu_write_entry() iommu/arm-smmu-v3: Add an ops indirection to the STE code iommu/arm-smmu-qcom: Don't build debug features as a kernel module iommu/amd: Add SVA domain support iommu: Add ops->domain_alloc_sva() iommu/amd: Initial SVA support for AMD IOMMU iommu/amd: Add support for enable/disable IOPF iommu/amd: Add IO page fault notifier handler ...
This commit is contained in:
commit
0cc6f45cec
@ -1435,7 +1435,7 @@ PAGE_SIZE multiple when read back.
|
||||
sec_pagetables
|
||||
Amount of memory allocated for secondary page tables,
|
||||
this currently includes KVM mmu allocations on x86
|
||||
and arm64.
|
||||
and arm64 and IOMMU page tables.
|
||||
|
||||
percpu (npn)
|
||||
Amount of memory used for storing per-cpu kernel
|
||||
|
69
Documentation/devicetree/bindings/iommu/qcom,tbu.yaml
Normal file
69
Documentation/devicetree/bindings/iommu/qcom,tbu.yaml
Normal file
@ -0,0 +1,69 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/iommu/qcom,tbu.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Qualcomm TBU (Translation Buffer Unit)
|
||||
|
||||
maintainers:
|
||||
- Georgi Djakov <quic_c_gdjako@quicinc.com>
|
||||
|
||||
description:
|
||||
The Qualcomm SMMU500 implementation consists of TCU and TBU. The TBU contains
|
||||
a Translation Lookaside Buffer (TLB) that caches page tables. TBUs provides
|
||||
debug features to trace and trigger debug transactions. There are multiple TBU
|
||||
instances with each client core.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,sc7280-tbu
|
||||
- qcom,sdm845-tbu
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
interconnects:
|
||||
maxItems: 1
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
qcom,stream-id-range:
|
||||
description: |
|
||||
Phandle of a SMMU device and Stream ID range (address and size) that
|
||||
is assigned by the TBU
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
items:
|
||||
- items:
|
||||
- description: phandle of a smmu node
|
||||
- description: stream id base address
|
||||
- description: stream id size
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- qcom,stream-id-range
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/qcom,gcc-sdm845.h>
|
||||
#include <dt-bindings/interconnect/qcom,icc.h>
|
||||
#include <dt-bindings/interconnect/qcom,sdm845.h>
|
||||
|
||||
tbu@150e1000 {
|
||||
compatible = "qcom,sdm845-tbu";
|
||||
reg = <0x150e1000 0x1000>;
|
||||
clocks = <&gcc GCC_AGGRE_NOC_PCIE_TBU_CLK>;
|
||||
interconnects = <&system_noc MASTER_GNOC_SNOC QCOM_ICC_TAG_ACTIVE_ONLY
|
||||
&config_noc SLAVE_IMEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
|
||||
power-domains = <&gcc HLOS1_VOTE_AGGRE_NOC_MMU_PCIE_TBU_GDSC>;
|
||||
qcom,stream-id-range = <&apps_smmu 0x1c00 0x400>;
|
||||
};
|
||||
...
|
@ -50,6 +50,7 @@ properties:
|
||||
- renesas,ipmmu-r8a779a0 # R-Car V3U
|
||||
- renesas,ipmmu-r8a779f0 # R-Car S4-8
|
||||
- renesas,ipmmu-r8a779g0 # R-Car V4H
|
||||
- renesas,ipmmu-r8a779h0 # R-Car V4M
|
||||
- const: renesas,rcar-gen4-ipmmu-vmsa # R-Car Gen4
|
||||
|
||||
reg:
|
||||
|
@ -1110,8 +1110,8 @@ KernelStack
|
||||
PageTables
|
||||
Memory consumed by userspace page tables
|
||||
SecPageTables
|
||||
Memory consumed by secondary page tables, this currently
|
||||
currently includes KVM mmu allocations on x86 and arm64.
|
||||
Memory consumed by secondary page tables, this currently includes
|
||||
KVM mmu and IOMMU allocations on x86 and arm64.
|
||||
NFS_Unstable
|
||||
Always zero. Previous counted pages which had been written to
|
||||
the server, but has not been committed to stable storage.
|
||||
|
@ -90,8 +90,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
/*
|
||||
* Plug in direct dma map ops.
|
||||
*/
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
/*
|
||||
* IOC hardware snoops all DMA traffic keeping the caches consistent
|
||||
|
@ -33,8 +33,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
}
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_CPU_V7M)) {
|
||||
/*
|
||||
|
@ -1709,11 +1709,15 @@ void arm_iommu_detach_device(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
||||
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping;
|
||||
u64 dma_base = 0, size = 1ULL << 32;
|
||||
|
||||
if (dev->dma_range_map) {
|
||||
dma_base = dma_range_map_min(dev->dma_range_map);
|
||||
size = dma_range_map_max(dev->dma_range_map) - dma_base;
|
||||
}
|
||||
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
|
||||
if (IS_ERR(mapping)) {
|
||||
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
|
||||
@ -1744,8 +1748,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
|
||||
|
||||
#else
|
||||
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
static void arm_setup_iommu_dma_ops(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1753,8 +1756,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
|
||||
|
||||
#endif /* CONFIG_ARM_DMA_USE_IOMMU */
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
/*
|
||||
* Due to legacy code that sets the ->dma_coherent flag from a bus
|
||||
@ -1774,7 +1776,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
return;
|
||||
|
||||
if (device_iommu_mapped(dev))
|
||||
arm_setup_iommu_dma_ops(dev, dma_base, size, coherent);
|
||||
arm_setup_iommu_dma_ops(dev);
|
||||
|
||||
xen_setup_dma_ops(dev);
|
||||
dev->archdata.dma_ops_setup = true;
|
||||
|
@ -46,7 +46,6 @@ config ARM64
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
select ARCH_HAS_SYSCALL_WRAPPER
|
||||
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAS_ZONE_DMA_SET if EXPERT
|
||||
select ARCH_HAVE_ELF_PROT
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <xen/xen.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
@ -39,15 +38,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
dcache_clean_poc(start, start + size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
void arch_teardown_dma_ops(struct device *dev)
|
||||
{
|
||||
dev->dma_ops = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
int cls = cache_line_size_of_cpu();
|
||||
|
||||
@ -58,8 +49,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
ARCH_DMA_MINALIGN, cls);
|
||||
|
||||
dev->dma_coherent = coherent;
|
||||
if (device_iommu_mapped(dev))
|
||||
iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
|
||||
|
||||
xen_setup_dma_ops(dev);
|
||||
}
|
||||
|
@ -8,17 +8,12 @@
|
||||
void acpi_arch_dma_setup(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
u64 mask, end = 0;
|
||||
u64 mask, end;
|
||||
const struct bus_dma_region *map = NULL;
|
||||
|
||||
ret = acpi_dma_get_range(dev, &map);
|
||||
if (!ret && map) {
|
||||
const struct bus_dma_region *r = map;
|
||||
|
||||
for (end = 0; r->size; r++) {
|
||||
if (r->dma_start + r->size - 1 > end)
|
||||
end = r->dma_start + r->size - 1;
|
||||
}
|
||||
end = dma_range_map_max(map);
|
||||
|
||||
mask = DMA_BIT_MASK(ilog2(end) + 1);
|
||||
dev->bus_dma_limit = end;
|
||||
|
@ -137,8 +137,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
dev->dma_coherent = coherent;
|
||||
}
|
||||
|
@ -128,8 +128,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size);
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent)
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
|
||||
TAINT_CPU_OUT_OF_SPEC,
|
||||
|
@ -8,7 +8,6 @@ void acpi_arch_dma_setup(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
u64 end, mask;
|
||||
u64 size = 0;
|
||||
const struct bus_dma_region *map = NULL;
|
||||
|
||||
/*
|
||||
@ -23,31 +22,23 @@ void acpi_arch_dma_setup(struct device *dev)
|
||||
}
|
||||
|
||||
if (dev->coherent_dma_mask)
|
||||
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
|
||||
end = dev->coherent_dma_mask;
|
||||
else
|
||||
size = 1ULL << 32;
|
||||
end = (1ULL << 32) - 1;
|
||||
|
||||
ret = acpi_dma_get_range(dev, &map);
|
||||
if (!ret && map) {
|
||||
const struct bus_dma_region *r = map;
|
||||
|
||||
for (end = 0; r->size; r++) {
|
||||
if (r->dma_start + r->size - 1 > end)
|
||||
end = r->dma_start + r->size - 1;
|
||||
}
|
||||
|
||||
size = end + 1;
|
||||
end = dma_range_map_max(map);
|
||||
dev->dma_range_map = map;
|
||||
}
|
||||
|
||||
if (ret == -ENODEV)
|
||||
ret = iort_dma_get_ranges(dev, &size);
|
||||
ret = iort_dma_get_ranges(dev, &end);
|
||||
if (!ret) {
|
||||
/*
|
||||
* Limit coherent and dma mask based on size retrieved from
|
||||
* firmware.
|
||||
*/
|
||||
end = size - 1;
|
||||
mask = DMA_BIT_MASK(ilog2(end) + 1);
|
||||
dev->bus_dma_limit = end;
|
||||
dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
|
||||
|
@ -1367,7 +1367,7 @@ int iort_iommu_configure_id(struct device *dev, const u32 *input_id)
|
||||
{ return -ENODEV; }
|
||||
#endif
|
||||
|
||||
static int nc_dma_get_range(struct device *dev, u64 *size)
|
||||
static int nc_dma_get_range(struct device *dev, u64 *limit)
|
||||
{
|
||||
struct acpi_iort_node *node;
|
||||
struct acpi_iort_named_component *ncomp;
|
||||
@ -1384,13 +1384,13 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*size = ncomp->memory_address_limit >= 64 ? U64_MAX :
|
||||
1ULL<<ncomp->memory_address_limit;
|
||||
*limit = ncomp->memory_address_limit >= 64 ? U64_MAX :
|
||||
(1ULL << ncomp->memory_address_limit) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rc_dma_get_range(struct device *dev, u64 *size)
|
||||
static int rc_dma_get_range(struct device *dev, u64 *limit)
|
||||
{
|
||||
struct acpi_iort_node *node;
|
||||
struct acpi_iort_root_complex *rc;
|
||||
@ -1408,8 +1408,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*size = rc->memory_address_limit >= 64 ? U64_MAX :
|
||||
1ULL<<rc->memory_address_limit;
|
||||
*limit = rc->memory_address_limit >= 64 ? U64_MAX :
|
||||
(1ULL << rc->memory_address_limit) - 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1417,16 +1417,16 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
|
||||
/**
|
||||
* iort_dma_get_ranges() - Look up DMA addressing limit for the device
|
||||
* @dev: device to lookup
|
||||
* @size: DMA range size result pointer
|
||||
* @limit: DMA limit result pointer
|
||||
*
|
||||
* Return: 0 on success, an error otherwise.
|
||||
*/
|
||||
int iort_dma_get_ranges(struct device *dev, u64 *size)
|
||||
int iort_dma_get_ranges(struct device *dev, u64 *limit)
|
||||
{
|
||||
if (dev_is_pci(dev))
|
||||
return rc_dma_get_range(dev, size);
|
||||
return rc_dma_get_range(dev, limit);
|
||||
else
|
||||
return nc_dma_get_range(dev, size);
|
||||
return nc_dma_get_range(dev, limit);
|
||||
}
|
||||
|
||||
static void __init acpi_iort_register_irq(int hwirq, const char *name,
|
||||
|
@ -1675,12 +1675,7 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
|
||||
if (ret == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
/*
|
||||
* Historically this routine doesn't fail driver probing due to errors
|
||||
* in acpi_iommu_configure_id().
|
||||
*/
|
||||
|
||||
arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
|
||||
arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -561,11 +561,7 @@ EXPORT_SYMBOL_GPL(hv_query_ext_cap);
|
||||
|
||||
void hv_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
/*
|
||||
* Hyper-V does not offer a vIOMMU in the guest
|
||||
* VM, so pass 0/NULL for the IOMMU settings
|
||||
*/
|
||||
arch_setup_dma_ops(dev, 0, 0, coherent);
|
||||
arch_setup_dma_ops(dev, coherent);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
|
||||
|
||||
|
@ -376,13 +376,17 @@ config ARM_SMMU_QCOM
|
||||
|
||||
config ARM_SMMU_QCOM_DEBUG
|
||||
bool "ARM SMMU QCOM implementation defined debug support"
|
||||
depends on ARM_SMMU_QCOM
|
||||
depends on ARM_SMMU_QCOM=y
|
||||
help
|
||||
Support for implementation specific debug features in ARM SMMU
|
||||
hardware found in QTI platforms.
|
||||
hardware found in QTI platforms. This include support for
|
||||
the Translation Buffer Units (TBU) that can be used to obtain
|
||||
additional information when debugging memory management issues
|
||||
like context faults.
|
||||
|
||||
Say Y here to enable debug for issues such as TLB sync timeouts
|
||||
which requires implementation defined register dumps.
|
||||
Say Y here to enable debug for issues such as context faults
|
||||
or TLB sync timeouts which requires implementation defined
|
||||
register dumps.
|
||||
|
||||
config ARM_SMMU_V3
|
||||
tristate "ARM Ltd. System MMU Version 3 (SMMUv3) Support"
|
||||
@ -397,9 +401,9 @@ config ARM_SMMU_V3
|
||||
Say Y here if your system includes an IOMMU device implementing
|
||||
the ARM SMMUv3 architecture.
|
||||
|
||||
if ARM_SMMU_V3
|
||||
config ARM_SMMU_V3_SVA
|
||||
bool "Shared Virtual Addressing support for the ARM SMMUv3"
|
||||
depends on ARM_SMMU_V3
|
||||
select IOMMU_SVA
|
||||
select IOMMU_IOPF
|
||||
select MMU_NOTIFIER
|
||||
@ -410,6 +414,17 @@ config ARM_SMMU_V3_SVA
|
||||
Say Y here if your system supports SVA extensions such as PCIe PASID
|
||||
and PRI.
|
||||
|
||||
config ARM_SMMU_V3_KUNIT_TEST
|
||||
tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
|
||||
depends on KUNIT
|
||||
depends on ARM_SMMU_V3_SVA
|
||||
default KUNIT_ALL_TESTS
|
||||
help
|
||||
Enable this option to unit-test arm-smmu-v3 driver functions.
|
||||
|
||||
If unsure, say N.
|
||||
endif
|
||||
|
||||
config S390_IOMMU
|
||||
def_bool y if S390 && PCI
|
||||
depends on S390 && PCI
|
||||
|
@ -7,9 +7,12 @@ config AMD_IOMMU
|
||||
select PCI_ATS
|
||||
select PCI_PRI
|
||||
select PCI_PASID
|
||||
select MMU_NOTIFIER
|
||||
select IOMMU_API
|
||||
select IOMMU_IOVA
|
||||
select IOMMU_IO_PGTABLE
|
||||
select IOMMU_SVA
|
||||
select IOMMU_IOPF
|
||||
select IOMMUFD_DRIVER if IOMMUFD
|
||||
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
|
||||
help
|
||||
|
@ -1,3 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
|
||||
obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
|
||||
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
|
||||
|
@ -17,10 +17,16 @@ irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
|
||||
irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
|
||||
irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
||||
void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
|
||||
void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
|
||||
u8 cntrl_intr, u8 cntrl_log,
|
||||
u32 status_run_mask, u32 status_overflow_mask);
|
||||
void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
|
||||
void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
|
||||
void amd_iommu_restart_ppr_log(struct amd_iommu *iommu);
|
||||
void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
|
||||
void iommu_feature_enable(struct amd_iommu *iommu, u8 bit);
|
||||
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
|
||||
gfp_t gfp, size_t size);
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
|
||||
void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
|
||||
@ -33,22 +39,47 @@ int amd_iommu_prepare(void);
|
||||
int amd_iommu_enable(void);
|
||||
void amd_iommu_disable(void);
|
||||
int amd_iommu_reenable(int mode);
|
||||
int amd_iommu_enable_faulting(void);
|
||||
int amd_iommu_enable_faulting(unsigned int cpu);
|
||||
extern int amd_iommu_guest_ir;
|
||||
extern enum io_pgtable_fmt amd_iommu_pgtable;
|
||||
extern int amd_iommu_gpt_level;
|
||||
|
||||
bool amd_iommu_v2_supported(void);
|
||||
/* Protection domain ops */
|
||||
struct protection_domain *protection_domain_alloc(unsigned int type);
|
||||
void protection_domain_free(struct protection_domain *domain);
|
||||
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
|
||||
struct mm_struct *mm);
|
||||
void amd_iommu_domain_free(struct iommu_domain *dom);
|
||||
int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid);
|
||||
void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
|
||||
struct iommu_domain *domain);
|
||||
|
||||
/* Device capabilities */
|
||||
int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
|
||||
void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
|
||||
/* SVA/PASID */
|
||||
bool amd_iommu_pasid_supported(void);
|
||||
|
||||
/* IOPF */
|
||||
int amd_iommu_iopf_init(struct amd_iommu *iommu);
|
||||
void amd_iommu_iopf_uninit(struct amd_iommu *iommu);
|
||||
void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
|
||||
struct iommu_page_response *resp);
|
||||
int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
|
||||
struct iommu_dev_data *dev_data);
|
||||
void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
|
||||
struct iommu_dev_data *dev_data);
|
||||
|
||||
/* GCR3 setup */
|
||||
int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
|
||||
ioasid_t pasid, unsigned long gcr3);
|
||||
int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
|
||||
|
||||
/* PPR */
|
||||
int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu);
|
||||
void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu);
|
||||
void amd_iommu_enable_ppr_log(struct amd_iommu *iommu);
|
||||
void amd_iommu_poll_ppr_log(struct amd_iommu *iommu);
|
||||
int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag);
|
||||
|
||||
/*
|
||||
* This function flushes all internal caches of
|
||||
* the IOMMU used by this driver.
|
||||
@ -56,6 +87,7 @@ int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
|
||||
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
|
||||
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
|
||||
void amd_iommu_domain_update(struct protection_domain *domain);
|
||||
void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set);
|
||||
void amd_iommu_domain_flush_complete(struct protection_domain *domain);
|
||||
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
|
||||
u64 address, size_t size);
|
||||
@ -73,9 +105,6 @@ static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
|
||||
}
|
||||
#endif
|
||||
|
||||
int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
|
||||
int status, int tag);
|
||||
|
||||
static inline bool is_rd890_iommu(struct pci_dev *pdev)
|
||||
{
|
||||
return (pdev->vendor == PCI_VENDOR_ID_ATI) &&
|
||||
@ -134,14 +163,6 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev)
|
||||
return PCI_SEG_DEVID_TO_SBDF(seg, devid);
|
||||
}
|
||||
|
||||
static inline void *alloc_pgtable_page(int nid, gfp_t gfp)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0);
|
||||
return page ? page_address(page) : NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must be called after device probe completes. During probe
|
||||
* use rlookup_amd_iommu() get the iommu.
|
||||
@ -157,6 +178,11 @@ static inline struct amd_iommu *get_amd_iommu_from_dev_data(struct iommu_dev_dat
|
||||
return iommu_get_iommu_dev(dev_data->dev, struct amd_iommu, iommu);
|
||||
}
|
||||
|
||||
static inline struct protection_domain *to_pdomain(struct iommu_domain *dom)
|
||||
{
|
||||
return container_of(dom, struct protection_domain, domain);
|
||||
}
|
||||
|
||||
bool translation_pre_enabled(struct amd_iommu *iommu);
|
||||
bool amd_iommu_is_attach_deferred(struct device *dev);
|
||||
int __init add_special_device(u8 type, u8 id, u32 *devid, bool cmd_line);
|
||||
|
@ -8,7 +8,9 @@
|
||||
#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
|
||||
#define _ASM_X86_AMD_IOMMU_TYPES_H
|
||||
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/list.h>
|
||||
@ -251,6 +253,14 @@
|
||||
#define PPR_ENTRY_SIZE 16
|
||||
#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
|
||||
|
||||
/* PAGE_SERVICE_REQUEST PPR Log Buffer Entry flags */
|
||||
#define PPR_FLAG_EXEC 0x002 /* Execute permission requested */
|
||||
#define PPR_FLAG_READ 0x004 /* Read permission requested */
|
||||
#define PPR_FLAG_WRITE 0x020 /* Write permission requested */
|
||||
#define PPR_FLAG_US 0x040 /* 1: User, 0: Supervisor */
|
||||
#define PPR_FLAG_RVSD 0x080 /* Reserved bit not zero */
|
||||
#define PPR_FLAG_GN 0x100 /* GVA and PASID is valid */
|
||||
|
||||
#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
|
||||
#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
|
||||
#define PPR_DEVID(x) ((x) & 0xffffULL)
|
||||
@ -503,6 +513,11 @@ extern struct kmem_cache *amd_iommu_irq_cache;
|
||||
list_for_each_entry((iommu), &amd_iommu_list, list)
|
||||
#define for_each_iommu_safe(iommu, next) \
|
||||
list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list)
|
||||
/* Making iterating over protection_domain->dev_data_list easier */
|
||||
#define for_each_pdom_dev_data(pdom_dev_data, pdom) \
|
||||
list_for_each_entry(pdom_dev_data, &pdom->dev_data_list, list)
|
||||
#define for_each_pdom_dev_data_safe(pdom_dev_data, next, pdom) \
|
||||
list_for_each_entry_safe((pdom_dev_data), (next), &pdom->dev_data_list, list)
|
||||
|
||||
struct amd_iommu;
|
||||
struct iommu_domain;
|
||||
@ -544,6 +559,16 @@ enum protection_domain_mode {
|
||||
PD_MODE_V2,
|
||||
};
|
||||
|
||||
/* Track dev_data/PASID list for the protection domain */
|
||||
struct pdom_dev_data {
|
||||
/* Points to attached device data */
|
||||
struct iommu_dev_data *dev_data;
|
||||
/* PASID attached to the protection domain */
|
||||
ioasid_t pasid;
|
||||
/* For protection_domain->dev_data_list */
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/*
|
||||
* This structure contains generic data for IOMMU protection domains
|
||||
* independent of their use.
|
||||
@ -560,6 +585,9 @@ struct protection_domain {
|
||||
bool dirty_tracking; /* dirty tracking is enabled in the domain */
|
||||
unsigned dev_cnt; /* devices assigned to this domain */
|
||||
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
|
||||
|
||||
struct mmu_notifier mn; /* mmu notifier for the SVA domain */
|
||||
struct list_head dev_data_list; /* List of pdom_dev_data */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -762,6 +790,10 @@ struct amd_iommu {
|
||||
/* DebugFS Info */
|
||||
struct dentry *debugfs;
|
||||
#endif
|
||||
|
||||
/* IOPF support */
|
||||
struct iopf_queue *iopf_queue;
|
||||
unsigned char iopfq_name[32];
|
||||
};
|
||||
|
||||
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
|
||||
@ -813,6 +845,7 @@ struct iommu_dev_data {
|
||||
struct device *dev;
|
||||
u16 devid; /* PCI Device ID */
|
||||
|
||||
u32 max_pasids; /* Max supported PASIDs */
|
||||
u32 flags; /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */
|
||||
int ats_qdep;
|
||||
u8 ats_enabled :1; /* ATS state */
|
||||
|
@ -36,6 +36,7 @@
|
||||
|
||||
#include "amd_iommu.h"
|
||||
#include "../irq_remapping.h"
|
||||
#include "../iommu-pages.h"
|
||||
|
||||
/*
|
||||
* definitions for the ACPI scanning code
|
||||
@ -419,7 +420,7 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
|
||||
}
|
||||
|
||||
/* Generic functions to enable/disable certain features of the IOMMU. */
|
||||
static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
|
||||
void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
|
||||
{
|
||||
u64 ctrl;
|
||||
|
||||
@ -649,8 +650,8 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_
|
||||
/* Allocate per PCI segment device table */
|
||||
static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
{
|
||||
pci_seg->dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
if (!pci_seg->dev_table)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -659,17 +660,16 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
|
||||
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
{
|
||||
free_pages((unsigned long)pci_seg->dev_table,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
iommu_free_pages(pci_seg->dev_table,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
pci_seg->dev_table = NULL;
|
||||
}
|
||||
|
||||
/* Allocate per PCI segment IOMMU rlookup table. */
|
||||
static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
{
|
||||
pci_seg->rlookup_table = (void *)__get_free_pages(
|
||||
GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(pci_seg->rlookup_table_size));
|
||||
pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL,
|
||||
get_order(pci_seg->rlookup_table_size));
|
||||
if (pci_seg->rlookup_table == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -678,16 +678,15 @@ static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
|
||||
static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
{
|
||||
free_pages((unsigned long)pci_seg->rlookup_table,
|
||||
get_order(pci_seg->rlookup_table_size));
|
||||
iommu_free_pages(pci_seg->rlookup_table,
|
||||
get_order(pci_seg->rlookup_table_size));
|
||||
pci_seg->rlookup_table = NULL;
|
||||
}
|
||||
|
||||
static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
{
|
||||
pci_seg->irq_lookup_table = (void *)__get_free_pages(
|
||||
GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(pci_seg->rlookup_table_size));
|
||||
pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL,
|
||||
get_order(pci_seg->rlookup_table_size));
|
||||
kmemleak_alloc(pci_seg->irq_lookup_table,
|
||||
pci_seg->rlookup_table_size, 1, GFP_KERNEL);
|
||||
if (pci_seg->irq_lookup_table == NULL)
|
||||
@ -699,8 +698,8 @@ static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_se
|
||||
static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
{
|
||||
kmemleak_free(pci_seg->irq_lookup_table);
|
||||
free_pages((unsigned long)pci_seg->irq_lookup_table,
|
||||
get_order(pci_seg->rlookup_table_size));
|
||||
iommu_free_pages(pci_seg->irq_lookup_table,
|
||||
get_order(pci_seg->rlookup_table_size));
|
||||
pci_seg->irq_lookup_table = NULL;
|
||||
}
|
||||
|
||||
@ -708,8 +707,8 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
{
|
||||
int i;
|
||||
|
||||
pci_seg->alias_table = (void *)__get_free_pages(GFP_KERNEL,
|
||||
get_order(pci_seg->alias_table_size));
|
||||
pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL,
|
||||
get_order(pci_seg->alias_table_size));
|
||||
if (!pci_seg->alias_table)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -724,8 +723,8 @@ static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
|
||||
static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
{
|
||||
free_pages((unsigned long)pci_seg->alias_table,
|
||||
get_order(pci_seg->alias_table_size));
|
||||
iommu_free_pages(pci_seg->alias_table,
|
||||
get_order(pci_seg->alias_table_size));
|
||||
pci_seg->alias_table = NULL;
|
||||
}
|
||||
|
||||
@ -736,8 +735,8 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
|
||||
*/
|
||||
static int __init alloc_command_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(CMD_BUFFER_SIZE));
|
||||
iommu->cmd_buf = iommu_alloc_pages(GFP_KERNEL,
|
||||
get_order(CMD_BUFFER_SIZE));
|
||||
|
||||
return iommu->cmd_buf ? 0 : -ENOMEM;
|
||||
}
|
||||
@ -746,9 +745,9 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
|
||||
* Interrupt handler has processed all pending events and adjusted head
|
||||
* and tail pointer. Reset overflow mask and restart logging again.
|
||||
*/
|
||||
static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
|
||||
u8 cntrl_intr, u8 cntrl_log,
|
||||
u32 status_run_mask, u32 status_overflow_mask)
|
||||
void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
|
||||
u8 cntrl_intr, u8 cntrl_log,
|
||||
u32 status_run_mask, u32 status_overflow_mask)
|
||||
{
|
||||
u32 status;
|
||||
|
||||
@ -789,17 +788,6 @@ void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
|
||||
MMIO_STATUS_GALOG_OVERFLOW_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function restarts ppr logging in case the IOMMU experienced
|
||||
* PPR log overflow.
|
||||
*/
|
||||
void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
|
||||
CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
|
||||
MMIO_STATUS_PPR_OVERFLOW_MASK);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function resets the command buffer if the IOMMU stopped fetching
|
||||
* commands from it.
|
||||
@ -845,19 +833,19 @@ static void iommu_disable_command_buffer(struct amd_iommu *iommu)
|
||||
|
||||
static void __init free_command_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
|
||||
iommu_free_pages(iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
|
||||
}
|
||||
|
||||
static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
|
||||
gfp_t gfp, size_t size)
|
||||
void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp,
|
||||
size_t size)
|
||||
{
|
||||
int order = get_order(size);
|
||||
void *buf = (void *)__get_free_pages(gfp, order);
|
||||
void *buf = iommu_alloc_pages(gfp, order);
|
||||
|
||||
if (buf &&
|
||||
check_feature(FEATURE_SNP) &&
|
||||
set_memory_4k((unsigned long)buf, (1 << order))) {
|
||||
free_pages((unsigned long)buf, order);
|
||||
iommu_free_pages(buf, order);
|
||||
buf = NULL;
|
||||
}
|
||||
|
||||
@ -867,7 +855,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
|
||||
/* allocates the memory where the IOMMU will log its events to */
|
||||
static int __init alloc_event_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
|
||||
iommu->evt_buf = iommu_alloc_4k_pages(iommu, GFP_KERNEL,
|
||||
EVT_BUFFER_SIZE);
|
||||
|
||||
return iommu->evt_buf ? 0 : -ENOMEM;
|
||||
@ -901,50 +889,14 @@ static void iommu_disable_event_buffer(struct amd_iommu *iommu)
|
||||
|
||||
static void __init free_event_buffer(struct amd_iommu *iommu)
|
||||
{
|
||||
free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
|
||||
}
|
||||
|
||||
/* allocates the memory where the IOMMU will log its events to */
|
||||
static int __init alloc_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
|
||||
PPR_LOG_SIZE);
|
||||
|
||||
return iommu->ppr_log ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static void iommu_enable_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
u64 entry;
|
||||
|
||||
if (iommu->ppr_log == NULL)
|
||||
return;
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_PPR_EN);
|
||||
|
||||
entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
|
||||
|
||||
memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
|
||||
/* set head and tail to zero manually */
|
||||
writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
|
||||
iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
|
||||
}
|
||||
|
||||
static void __init free_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
|
||||
iommu_free_pages(iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
|
||||
}
|
||||
|
||||
static void free_ga_log(struct amd_iommu *iommu)
|
||||
{
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE));
|
||||
free_pages((unsigned long)iommu->ga_log_tail, get_order(8));
|
||||
iommu_free_pages(iommu->ga_log, get_order(GA_LOG_SIZE));
|
||||
iommu_free_pages(iommu->ga_log_tail, get_order(8));
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -989,13 +941,11 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
|
||||
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
|
||||
return 0;
|
||||
|
||||
iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(GA_LOG_SIZE));
|
||||
iommu->ga_log = iommu_alloc_pages(GFP_KERNEL, get_order(GA_LOG_SIZE));
|
||||
if (!iommu->ga_log)
|
||||
goto err_out;
|
||||
|
||||
iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(8));
|
||||
iommu->ga_log_tail = iommu_alloc_pages(GFP_KERNEL, get_order(8));
|
||||
if (!iommu->ga_log_tail)
|
||||
goto err_out;
|
||||
|
||||
@ -1008,7 +958,7 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
|
||||
|
||||
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO, 1);
|
||||
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1);
|
||||
|
||||
return iommu->cmd_sem ? 0 : -ENOMEM;
|
||||
}
|
||||
@ -1016,7 +966,7 @@ static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
|
||||
static void __init free_cwwb_sem(struct amd_iommu *iommu)
|
||||
{
|
||||
if (iommu->cmd_sem)
|
||||
free_page((unsigned long)iommu->cmd_sem);
|
||||
iommu_free_page((void *)iommu->cmd_sem);
|
||||
}
|
||||
|
||||
static void iommu_enable_xt(struct amd_iommu *iommu)
|
||||
@ -1081,7 +1031,6 @@ static bool __copy_device_table(struct amd_iommu *iommu)
|
||||
u32 lo, hi, devid, old_devtb_size;
|
||||
phys_addr_t old_devtb_phys;
|
||||
u16 dom_id, dte_v, irq_v;
|
||||
gfp_t gfp_flag;
|
||||
u64 tmp;
|
||||
|
||||
/* Each IOMMU use separate device table with the same size */
|
||||
@ -1115,9 +1064,8 @@ static bool __copy_device_table(struct amd_iommu *iommu)
|
||||
if (!old_devtb)
|
||||
return false;
|
||||
|
||||
gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
|
||||
pci_seg->old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
if (pci_seg->old_dev_tbl_cpy == NULL) {
|
||||
pr_err("Failed to allocate memory for copying old device table!\n");
|
||||
memunmap(old_devtb);
|
||||
@ -1683,9 +1631,10 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
|
||||
free_cwwb_sem(iommu);
|
||||
free_command_buffer(iommu);
|
||||
free_event_buffer(iommu);
|
||||
free_ppr_log(iommu);
|
||||
amd_iommu_free_ppr_log(iommu);
|
||||
free_ga_log(iommu);
|
||||
iommu_unmap_mmio_space(iommu);
|
||||
amd_iommu_iopf_uninit(iommu);
|
||||
}
|
||||
|
||||
static void __init free_iommu_all(void)
|
||||
@ -2097,9 +2046,11 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
|
||||
amd_iommu_max_glx_val = glxval;
|
||||
else
|
||||
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
|
||||
|
||||
iommu_enable_gt(iommu);
|
||||
}
|
||||
|
||||
if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
|
||||
if (check_feature(FEATURE_PPR) && amd_iommu_alloc_ppr_log(iommu))
|
||||
return -ENOMEM;
|
||||
|
||||
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
|
||||
@ -2155,6 +2106,16 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Allocate per IOMMU IOPF queue here so that in attach device path,
|
||||
* PRI capable device can be added to IOPF queue
|
||||
*/
|
||||
if (amd_iommu_gt_ppr_supported()) {
|
||||
ret = amd_iommu_iopf_init(iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
|
||||
|
||||
return pci_enable_device(iommu->dev);
|
||||
@ -2773,7 +2734,6 @@ static void early_enable_iommu(struct amd_iommu *iommu)
|
||||
iommu_enable_command_buffer(iommu);
|
||||
iommu_enable_event_buffer(iommu);
|
||||
iommu_set_exclusion_range(iommu);
|
||||
iommu_enable_gt(iommu);
|
||||
iommu_enable_ga(iommu);
|
||||
iommu_enable_xt(iommu);
|
||||
iommu_enable_irtcachedis(iommu);
|
||||
@ -2805,8 +2765,8 @@ static void early_enable_iommus(void)
|
||||
|
||||
for_each_pci_segment(pci_seg) {
|
||||
if (pci_seg->old_dev_tbl_cpy != NULL) {
|
||||
free_pages((unsigned long)pci_seg->old_dev_tbl_cpy,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
iommu_free_pages(pci_seg->old_dev_tbl_cpy,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
pci_seg->old_dev_tbl_cpy = NULL;
|
||||
}
|
||||
}
|
||||
@ -2819,8 +2779,8 @@ static void early_enable_iommus(void)
|
||||
pr_info("Copied DEV table from previous kernel.\n");
|
||||
|
||||
for_each_pci_segment(pci_seg) {
|
||||
free_pages((unsigned long)pci_seg->dev_table,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
iommu_free_pages(pci_seg->dev_table,
|
||||
get_order(pci_seg->dev_table_size));
|
||||
pci_seg->dev_table = pci_seg->old_dev_tbl_cpy;
|
||||
}
|
||||
|
||||
@ -2830,7 +2790,6 @@ static void early_enable_iommus(void)
|
||||
iommu_disable_irtcachedis(iommu);
|
||||
iommu_enable_command_buffer(iommu);
|
||||
iommu_enable_event_buffer(iommu);
|
||||
iommu_enable_gt(iommu);
|
||||
iommu_enable_ga(iommu);
|
||||
iommu_enable_xt(iommu);
|
||||
iommu_enable_irtcachedis(iommu);
|
||||
@ -2840,12 +2799,15 @@ static void early_enable_iommus(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void enable_iommus_v2(void)
|
||||
static void enable_iommus_ppr(void)
|
||||
{
|
||||
struct amd_iommu *iommu;
|
||||
|
||||
if (!amd_iommu_gt_ppr_supported())
|
||||
return;
|
||||
|
||||
for_each_iommu(iommu)
|
||||
iommu_enable_ppr_log(iommu);
|
||||
amd_iommu_enable_ppr_log(iommu);
|
||||
}
|
||||
|
||||
static void enable_iommus_vapic(void)
|
||||
@ -3022,8 +2984,8 @@ static bool __init check_ioapic_information(void)
|
||||
|
||||
static void __init free_dma_resources(void)
|
||||
{
|
||||
free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
|
||||
get_order(MAX_DOMAIN_ID/8));
|
||||
iommu_free_pages(amd_iommu_pd_alloc_bitmap,
|
||||
get_order(MAX_DOMAIN_ID / 8));
|
||||
amd_iommu_pd_alloc_bitmap = NULL;
|
||||
|
||||
free_unity_maps();
|
||||
@ -3095,9 +3057,8 @@ static int __init early_amd_iommu_init(void)
|
||||
/* Device table - directly used by all IOMMUs */
|
||||
ret = -ENOMEM;
|
||||
|
||||
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
|
||||
GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(MAX_DOMAIN_ID/8));
|
||||
amd_iommu_pd_alloc_bitmap = iommu_alloc_pages(GFP_KERNEL,
|
||||
get_order(MAX_DOMAIN_ID / 8));
|
||||
if (amd_iommu_pd_alloc_bitmap == NULL)
|
||||
goto out;
|
||||
|
||||
@ -3181,7 +3142,7 @@ static int amd_iommu_enable_interrupts(void)
|
||||
* PPR and GA log interrupt for all IOMMUs.
|
||||
*/
|
||||
enable_iommus_vapic();
|
||||
enable_iommus_v2();
|
||||
enable_iommus_ppr();
|
||||
|
||||
out:
|
||||
return ret;
|
||||
@ -3392,7 +3353,7 @@ int amd_iommu_reenable(int mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init amd_iommu_enable_faulting(void)
|
||||
int __init amd_iommu_enable_faulting(unsigned int cpu)
|
||||
{
|
||||
/* We enable MSI later when PCI is initialized */
|
||||
return 0;
|
||||
@ -3690,7 +3651,7 @@ __setup("ivrs_ioapic", parse_ivrs_ioapic);
|
||||
__setup("ivrs_hpet", parse_ivrs_hpet);
|
||||
__setup("ivrs_acpihid", parse_ivrs_acpihid);
|
||||
|
||||
bool amd_iommu_v2_supported(void)
|
||||
bool amd_iommu_pasid_supported(void)
|
||||
{
|
||||
/* CPU page table size should match IOMMU guest page table size */
|
||||
if (cpu_feature_enabled(X86_FEATURE_LA57) &&
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
#include "amd_iommu_types.h"
|
||||
#include "amd_iommu.h"
|
||||
#include "../iommu-pages.h"
|
||||
|
||||
static void v1_tlb_flush_all(void *cookie)
|
||||
{
|
||||
@ -156,7 +157,7 @@ static bool increase_address_space(struct protection_domain *domain,
|
||||
bool ret = true;
|
||||
u64 *pte;
|
||||
|
||||
pte = alloc_pgtable_page(domain->nid, gfp);
|
||||
pte = iommu_alloc_page_node(domain->nid, gfp);
|
||||
if (!pte)
|
||||
return false;
|
||||
|
||||
@ -187,7 +188,7 @@ static bool increase_address_space(struct protection_domain *domain,
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
free_page((unsigned long)pte);
|
||||
iommu_free_page(pte);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -250,7 +251,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
|
||||
|
||||
if (!IOMMU_PTE_PRESENT(__pte) ||
|
||||
pte_level == PAGE_MODE_NONE) {
|
||||
page = alloc_pgtable_page(domain->nid, gfp);
|
||||
page = iommu_alloc_page_node(domain->nid, gfp);
|
||||
|
||||
if (!page)
|
||||
return NULL;
|
||||
@ -259,7 +260,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
|
||||
|
||||
/* pte could have been changed somewhere. */
|
||||
if (!try_cmpxchg64(pte, &__pte, __npte))
|
||||
free_page((unsigned long)page);
|
||||
iommu_free_page(page);
|
||||
else if (IOMMU_PTE_PRESENT(__pte))
|
||||
*updated = true;
|
||||
|
||||
@ -431,7 +432,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
}
|
||||
|
||||
/* Everything flushed out, free pages now */
|
||||
put_pages_list(&freelist);
|
||||
iommu_put_pages_list(&freelist);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -580,7 +581,7 @@ static void v1_free_pgtable(struct io_pgtable *iop)
|
||||
/* Make changes visible to IOMMUs */
|
||||
amd_iommu_domain_update(dom);
|
||||
|
||||
put_pages_list(&freelist);
|
||||
iommu_put_pages_list(&freelist);
|
||||
}
|
||||
|
||||
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
#include "amd_iommu_types.h"
|
||||
#include "amd_iommu.h"
|
||||
#include "../iommu-pages.h"
|
||||
|
||||
#define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
|
||||
#define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
|
||||
@ -99,11 +100,6 @@ static inline int page_size_to_level(u64 pg_size)
|
||||
return PAGE_MODE_1_LEVEL;
|
||||
}
|
||||
|
||||
static inline void free_pgtable_page(u64 *pt)
|
||||
{
|
||||
free_page((unsigned long)pt);
|
||||
}
|
||||
|
||||
static void free_pgtable(u64 *pt, int level)
|
||||
{
|
||||
u64 *p;
|
||||
@ -125,10 +121,10 @@ static void free_pgtable(u64 *pt, int level)
|
||||
if (level > 2)
|
||||
free_pgtable(p, level - 1);
|
||||
else
|
||||
free_pgtable_page(p);
|
||||
iommu_free_page(p);
|
||||
}
|
||||
|
||||
free_pgtable_page(pt);
|
||||
iommu_free_page(pt);
|
||||
}
|
||||
|
||||
/* Allocate page table */
|
||||
@ -156,14 +152,14 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
|
||||
}
|
||||
|
||||
if (!IOMMU_PTE_PRESENT(__pte)) {
|
||||
page = alloc_pgtable_page(nid, gfp);
|
||||
page = iommu_alloc_page_node(nid, gfp);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
__npte = set_pgtable_attr(page);
|
||||
/* pte could have been changed somewhere. */
|
||||
if (cmpxchg64(pte, __pte, __npte) != __pte)
|
||||
free_pgtable_page(page);
|
||||
iommu_free_page(page);
|
||||
else if (IOMMU_PTE_PRESENT(__pte))
|
||||
*updated = true;
|
||||
|
||||
@ -185,7 +181,7 @@ static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
|
||||
if (pg_size == IOMMU_PAGE_SIZE_1G)
|
||||
free_pgtable(__pte, end_level - 1);
|
||||
else if (pg_size == IOMMU_PAGE_SIZE_2M)
|
||||
free_pgtable_page(__pte);
|
||||
iommu_free_page(__pte);
|
||||
}
|
||||
|
||||
return pte;
|
||||
@ -366,7 +362,7 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
|
||||
struct protection_domain *pdom = (struct protection_domain *)cookie;
|
||||
int ias = IOMMU_IN_ADDR_BIT_SIZE;
|
||||
|
||||
pgtable->pgd = alloc_pgtable_page(pdom->nid, GFP_ATOMIC);
|
||||
pgtable->pgd = iommu_alloc_page_node(pdom->nid, GFP_ATOMIC);
|
||||
if (!pgtable->pgd)
|
||||
return NULL;
|
||||
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "amd_iommu.h"
|
||||
#include "../dma-iommu.h"
|
||||
#include "../irq_remapping.h"
|
||||
#include "../iommu-pages.h"
|
||||
|
||||
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
|
||||
|
||||
@ -89,6 +90,21 @@ static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
|
||||
return (pdom && (pdom->pd_mode == PD_MODE_V2));
|
||||
}
|
||||
|
||||
static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom)
|
||||
{
|
||||
return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY);
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot support PASID w/ existing v1 page table in the same domain
|
||||
* since it will be nested. However, existing domain w/ v2 page table
|
||||
* or passthrough mode can be used for PASID.
|
||||
*/
|
||||
static inline bool pdom_is_sva_capable(struct protection_domain *pdom)
|
||||
{
|
||||
return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom);
|
||||
}
|
||||
|
||||
static inline int get_acpihid_device_id(struct device *dev,
|
||||
struct acpihid_map_entry **entry)
|
||||
{
|
||||
@ -179,11 +195,6 @@ static struct amd_iommu *rlookup_amd_iommu(struct device *dev)
|
||||
return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid));
|
||||
}
|
||||
|
||||
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
|
||||
{
|
||||
return container_of(dom, struct protection_domain, domain);
|
||||
}
|
||||
|
||||
static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
@ -384,7 +395,7 @@ static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
|
||||
}
|
||||
}
|
||||
|
||||
int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
|
||||
static inline int pdev_enable_cap_pri(struct pci_dev *pdev)
|
||||
{
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
|
||||
int ret = -EINVAL;
|
||||
@ -392,6 +403,9 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
|
||||
if (dev_data->pri_enabled)
|
||||
return 0;
|
||||
|
||||
if (!dev_data->ats_enabled)
|
||||
return 0;
|
||||
|
||||
if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
|
||||
/*
|
||||
* First reset the PRI state of the device.
|
||||
@ -408,7 +422,7 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev)
|
||||
static inline void pdev_disable_cap_pri(struct pci_dev *pdev)
|
||||
{
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
|
||||
|
||||
@ -450,15 +464,14 @@ static void pdev_enable_caps(struct pci_dev *pdev)
|
||||
{
|
||||
pdev_enable_cap_ats(pdev);
|
||||
pdev_enable_cap_pasid(pdev);
|
||||
amd_iommu_pdev_enable_cap_pri(pdev);
|
||||
|
||||
pdev_enable_cap_pri(pdev);
|
||||
}
|
||||
|
||||
static void pdev_disable_caps(struct pci_dev *pdev)
|
||||
{
|
||||
pdev_disable_cap_ats(pdev);
|
||||
pdev_disable_cap_pasid(pdev);
|
||||
amd_iommu_pdev_disable_cap_pri(pdev);
|
||||
pdev_disable_cap_pri(pdev);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -818,59 +831,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
|
||||
writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
|
||||
}
|
||||
|
||||
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
u32 head, tail;
|
||||
|
||||
if (iommu->ppr_log == NULL)
|
||||
return;
|
||||
|
||||
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||
|
||||
while (head != tail) {
|
||||
volatile u64 *raw;
|
||||
u64 entry[2];
|
||||
int i;
|
||||
|
||||
raw = (u64 *)(iommu->ppr_log + head);
|
||||
|
||||
/*
|
||||
* Hardware bug: Interrupt may arrive before the entry is
|
||||
* written to memory. If this happens we need to wait for the
|
||||
* entry to arrive.
|
||||
*/
|
||||
for (i = 0; i < LOOP_TIMEOUT; ++i) {
|
||||
if (PPR_REQ_TYPE(raw[0]) != 0)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* Avoid memcpy function-call overhead */
|
||||
entry[0] = raw[0];
|
||||
entry[1] = raw[1];
|
||||
|
||||
/*
|
||||
* To detect the hardware errata 733 we need to clear the
|
||||
* entry back to zero. This issue does not exist on SNP
|
||||
* enabled system. Also this buffer is not writeable on
|
||||
* SNP enabled system.
|
||||
*/
|
||||
if (!amd_iommu_snp_en)
|
||||
raw[0] = raw[1] = 0UL;
|
||||
|
||||
/* Update head pointer of hardware ring-buffer */
|
||||
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
|
||||
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
|
||||
/* TODO: PPR Handler will be added when we add IOPF support */
|
||||
|
||||
/* Refresh ring-buffer information */
|
||||
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
static int (*iommu_ga_log_notifier)(u32);
|
||||
|
||||
@ -991,7 +951,7 @@ irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
|
||||
{
|
||||
amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
|
||||
MMIO_STATUS_PPR_OVERFLOW_MASK,
|
||||
iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
|
||||
amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -1664,15 +1624,14 @@ void amd_iommu_domain_update(struct protection_domain *domain)
|
||||
amd_iommu_domain_flush_all(domain);
|
||||
}
|
||||
|
||||
int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
|
||||
int status, int tag)
|
||||
int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
struct amd_iommu *iommu;
|
||||
struct iommu_cmd cmd;
|
||||
|
||||
dev_data = dev_iommu_priv_get(&pdev->dev);
|
||||
iommu = get_amd_iommu_from_dev(&pdev->dev);
|
||||
dev_data = dev_iommu_priv_get(dev);
|
||||
iommu = get_amd_iommu_from_dev(dev);
|
||||
|
||||
build_complete_ppr(&cmd, dev_data->devid, pasid, status,
|
||||
tag, dev_data->pri_tlp);
|
||||
@ -1728,7 +1687,7 @@ static void free_gcr3_tbl_level1(u64 *tbl)
|
||||
|
||||
ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK);
|
||||
|
||||
free_page((unsigned long)ptr);
|
||||
iommu_free_page(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1761,7 +1720,7 @@ static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info)
|
||||
/* Free per device domain ID */
|
||||
domain_id_free(gcr3_info->domid);
|
||||
|
||||
free_page((unsigned long)gcr3_info->gcr3_tbl);
|
||||
iommu_free_page(gcr3_info->gcr3_tbl);
|
||||
gcr3_info->gcr3_tbl = NULL;
|
||||
}
|
||||
|
||||
@ -1796,7 +1755,7 @@ static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info,
|
||||
/* Allocate per device domain ID */
|
||||
gcr3_info->domid = domain_id_alloc();
|
||||
|
||||
gcr3_info->gcr3_tbl = alloc_pgtable_page(nid, GFP_ATOMIC);
|
||||
gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC);
|
||||
if (gcr3_info->gcr3_tbl == NULL) {
|
||||
domain_id_free(gcr3_info->domid);
|
||||
return -ENOMEM;
|
||||
@ -2002,10 +1961,78 @@ static void clear_dte_entry(struct amd_iommu *iommu, u16 devid)
|
||||
amd_iommu_apply_erratum_63(iommu, devid);
|
||||
}
|
||||
|
||||
/* Update and flush DTE for the given device */
|
||||
void amd_iommu_dev_update_dte(struct iommu_dev_data *dev_data, bool set)
|
||||
{
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
|
||||
|
||||
if (set)
|
||||
set_dte_entry(iommu, dev_data);
|
||||
else
|
||||
clear_dte_entry(iommu, dev_data->devid);
|
||||
|
||||
clone_aliases(iommu, dev_data->dev);
|
||||
device_flush_dte(dev_data);
|
||||
iommu_completion_wait(iommu);
|
||||
}
|
||||
|
||||
/*
|
||||
* If domain is SVA capable then initialize GCR3 table. Also if domain is
|
||||
* in v2 page table mode then update GCR3[0].
|
||||
*/
|
||||
static int init_gcr3_table(struct iommu_dev_data *dev_data,
|
||||
struct protection_domain *pdom)
|
||||
{
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
|
||||
int max_pasids = dev_data->max_pasids;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* If domain is in pt mode then setup GCR3 table only if device
|
||||
* is PASID capable
|
||||
*/
|
||||
if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data))
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* By default, setup GCR3 table to support MAX PASIDs
|
||||
* supported by the device/IOMMU.
|
||||
*/
|
||||
ret = setup_gcr3_table(&dev_data->gcr3_info, iommu,
|
||||
max_pasids > 0 ? max_pasids : 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Setup GCR3[0] only if domain is setup with v2 page table mode */
|
||||
if (!pdom_is_v2_pgtbl_mode(pdom))
|
||||
return ret;
|
||||
|
||||
ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true);
|
||||
if (ret)
|
||||
free_gcr3_table(&dev_data->gcr3_info);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void destroy_gcr3_table(struct iommu_dev_data *dev_data,
|
||||
struct protection_domain *pdom)
|
||||
{
|
||||
struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
|
||||
|
||||
if (pdom_is_v2_pgtbl_mode(pdom))
|
||||
update_gcr3(dev_data, 0, 0, false);
|
||||
|
||||
if (gcr3_info->gcr3_tbl == NULL)
|
||||
return;
|
||||
|
||||
free_gcr3_table(gcr3_info);
|
||||
}
|
||||
|
||||
static int do_attach(struct iommu_dev_data *dev_data,
|
||||
struct protection_domain *domain)
|
||||
{
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
|
||||
struct pci_dev *pdev;
|
||||
int ret = 0;
|
||||
|
||||
/* Update data structures */
|
||||
@ -2020,26 +2047,29 @@ static int do_attach(struct iommu_dev_data *dev_data,
|
||||
domain->dev_iommu[iommu->index] += 1;
|
||||
domain->dev_cnt += 1;
|
||||
|
||||
/* Init GCR3 table and update device table */
|
||||
if (domain->pd_mode == PD_MODE_V2) {
|
||||
/* By default, setup GCR3 table to support single PASID */
|
||||
ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, 1);
|
||||
pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL;
|
||||
if (pdom_is_sva_capable(domain)) {
|
||||
ret = init_gcr3_table(dev_data, domain);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = update_gcr3(dev_data, 0,
|
||||
iommu_virt_to_phys(domain->iop.pgd), true);
|
||||
if (ret) {
|
||||
free_gcr3_table(&dev_data->gcr3_info);
|
||||
return ret;
|
||||
if (pdev) {
|
||||
pdev_enable_caps(pdev);
|
||||
|
||||
/*
|
||||
* Device can continue to function even if IOPF
|
||||
* enablement failed. Hence in error path just
|
||||
* disable device PRI support.
|
||||
*/
|
||||
if (amd_iommu_iopf_add_device(iommu, dev_data))
|
||||
pdev_disable_cap_pri(pdev);
|
||||
}
|
||||
} else if (pdev) {
|
||||
pdev_enable_cap_ats(pdev);
|
||||
}
|
||||
|
||||
/* Update device table */
|
||||
set_dte_entry(iommu, dev_data);
|
||||
clone_aliases(iommu, dev_data->dev);
|
||||
|
||||
device_flush_dte(dev_data);
|
||||
amd_iommu_dev_update_dte(dev_data, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2050,19 +2080,15 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
|
||||
|
||||
/* Clear GCR3 table */
|
||||
if (domain->pd_mode == PD_MODE_V2) {
|
||||
update_gcr3(dev_data, 0, 0, false);
|
||||
free_gcr3_table(&dev_data->gcr3_info);
|
||||
}
|
||||
if (pdom_is_sva_capable(domain))
|
||||
destroy_gcr3_table(dev_data, domain);
|
||||
|
||||
/* Update data structures */
|
||||
dev_data->domain = NULL;
|
||||
list_del(&dev_data->list);
|
||||
clear_dte_entry(iommu, dev_data->devid);
|
||||
clone_aliases(iommu, dev_data->dev);
|
||||
|
||||
/* Flush the DTE entry */
|
||||
device_flush_dte(dev_data);
|
||||
/* Clear DTE and flush the entry */
|
||||
amd_iommu_dev_update_dte(dev_data, false);
|
||||
|
||||
/* Flush IOTLB and wait for the flushes to finish */
|
||||
amd_iommu_domain_flush_all(domain);
|
||||
@ -2094,9 +2120,6 @@ static int attach_device(struct device *dev,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
pdev_enable_caps(to_pci_dev(dev));
|
||||
|
||||
ret = do_attach(dev_data, domain);
|
||||
|
||||
out:
|
||||
@ -2112,12 +2135,11 @@ static int attach_device(struct device *dev,
|
||||
*/
|
||||
static void detach_device(struct device *dev)
|
||||
{
|
||||
struct protection_domain *domain;
|
||||
struct iommu_dev_data *dev_data;
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
|
||||
struct protection_domain *domain = dev_data->domain;
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
|
||||
unsigned long flags;
|
||||
|
||||
dev_data = dev_iommu_priv_get(dev);
|
||||
domain = dev_data->domain;
|
||||
bool ppr = dev_data->ppr;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
@ -2132,8 +2154,19 @@ static void detach_device(struct device *dev)
|
||||
if (WARN_ON(!dev_data->domain))
|
||||
goto out;
|
||||
|
||||
if (ppr) {
|
||||
iopf_queue_flush_dev(dev);
|
||||
|
||||
/* Updated here so that it gets reflected in DTE */
|
||||
dev_data->ppr = false;
|
||||
}
|
||||
|
||||
do_detach(dev_data);
|
||||
|
||||
/* Remove IOPF handler */
|
||||
if (ppr)
|
||||
amd_iommu_iopf_remove_device(iommu, dev_data);
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
pdev_disable_caps(to_pci_dev(dev));
|
||||
|
||||
@ -2147,6 +2180,7 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
|
||||
{
|
||||
struct iommu_device *iommu_dev;
|
||||
struct amd_iommu *iommu;
|
||||
struct iommu_dev_data *dev_data;
|
||||
int ret;
|
||||
|
||||
if (!check_device(dev))
|
||||
@ -2173,18 +2207,22 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
|
||||
iommu_dev = &iommu->iommu;
|
||||
}
|
||||
|
||||
/*
|
||||
* If IOMMU and device supports PASID then it will contain max
|
||||
* supported PASIDs, else it will be zero.
|
||||
*/
|
||||
dev_data = dev_iommu_priv_get(dev);
|
||||
if (amd_iommu_pasid_supported() && dev_is_pci(dev) &&
|
||||
pdev_pasid_supported(dev_data)) {
|
||||
dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids,
|
||||
pci_max_pasids(to_pci_dev(dev)));
|
||||
}
|
||||
|
||||
iommu_completion_wait(iommu);
|
||||
|
||||
return iommu_dev;
|
||||
}
|
||||
|
||||
static void amd_iommu_probe_finalize(struct device *dev)
|
||||
{
|
||||
/* Domains are initialized for this device - have a look what we ended up with */
|
||||
set_dma_ops(dev, NULL);
|
||||
iommu_setup_dma_ops(dev, 0, U64_MAX);
|
||||
}
|
||||
|
||||
static void amd_iommu_release_device(struct device *dev)
|
||||
{
|
||||
struct amd_iommu *iommu;
|
||||
@ -2236,7 +2274,7 @@ static void cleanup_domain(struct protection_domain *domain)
|
||||
WARN_ON(domain->dev_cnt != 0);
|
||||
}
|
||||
|
||||
static void protection_domain_free(struct protection_domain *domain)
|
||||
void protection_domain_free(struct protection_domain *domain)
|
||||
{
|
||||
if (!domain)
|
||||
return;
|
||||
@ -2245,7 +2283,7 @@ static void protection_domain_free(struct protection_domain *domain)
|
||||
free_io_pgtable_ops(&domain->iop.iop.ops);
|
||||
|
||||
if (domain->iop.root)
|
||||
free_page((unsigned long)domain->iop.root);
|
||||
iommu_free_page(domain->iop.root);
|
||||
|
||||
if (domain->id)
|
||||
domain_id_free(domain->id);
|
||||
@ -2260,7 +2298,7 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
|
||||
BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
|
||||
|
||||
if (mode != PAGE_MODE_NONE) {
|
||||
pt_root = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
pt_root = iommu_alloc_page(GFP_KERNEL);
|
||||
if (!pt_root)
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -2279,7 +2317,7 @@ static int protection_domain_init_v2(struct protection_domain *pdom)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct protection_domain *protection_domain_alloc(unsigned int type)
|
||||
struct protection_domain *protection_domain_alloc(unsigned int type)
|
||||
{
|
||||
struct io_pgtable_ops *pgtbl_ops;
|
||||
struct protection_domain *domain;
|
||||
@ -2296,11 +2334,13 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
|
||||
|
||||
spin_lock_init(&domain->lock);
|
||||
INIT_LIST_HEAD(&domain->dev_list);
|
||||
INIT_LIST_HEAD(&domain->dev_data_list);
|
||||
domain->nid = NUMA_NO_NODE;
|
||||
|
||||
switch (type) {
|
||||
/* No need to allocate io pgtable ops in passthrough mode */
|
||||
case IOMMU_DOMAIN_IDENTITY:
|
||||
case IOMMU_DOMAIN_SVA:
|
||||
return domain;
|
||||
case IOMMU_DOMAIN_DMA:
|
||||
pgtable = amd_iommu_pgtable;
|
||||
@ -2420,7 +2460,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
|
||||
return do_iommu_domain_alloc(type, dev, flags);
|
||||
}
|
||||
|
||||
static void amd_iommu_domain_free(struct iommu_domain *dom)
|
||||
void amd_iommu_domain_free(struct iommu_domain *dom)
|
||||
{
|
||||
struct protection_domain *domain;
|
||||
unsigned long flags;
|
||||
@ -2785,18 +2825,54 @@ static const struct iommu_dirty_ops amd_dirty_ops = {
|
||||
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
|
||||
};
|
||||
|
||||
static int amd_iommu_dev_enable_feature(struct device *dev,
|
||||
enum iommu_dev_features feat)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amd_iommu_dev_disable_feature(struct device *dev,
|
||||
enum iommu_dev_features feat)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct iommu_ops amd_iommu_ops = {
|
||||
.capable = amd_iommu_capable,
|
||||
.domain_alloc = amd_iommu_domain_alloc,
|
||||
.domain_alloc_user = amd_iommu_domain_alloc_user,
|
||||
.domain_alloc_sva = amd_iommu_domain_alloc_sva,
|
||||
.probe_device = amd_iommu_probe_device,
|
||||
.release_device = amd_iommu_release_device,
|
||||
.probe_finalize = amd_iommu_probe_finalize,
|
||||
.device_group = amd_iommu_device_group,
|
||||
.get_resv_regions = amd_iommu_get_resv_regions,
|
||||
.is_attach_deferred = amd_iommu_is_attach_deferred,
|
||||
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
|
||||
.def_domain_type = amd_iommu_def_domain_type,
|
||||
.dev_enable_feat = amd_iommu_dev_enable_feature,
|
||||
.dev_disable_feat = amd_iommu_dev_disable_feature,
|
||||
.remove_dev_pasid = amd_iommu_remove_dev_pasid,
|
||||
.page_response = amd_iommu_page_response,
|
||||
.default_domain_ops = &(const struct iommu_domain_ops) {
|
||||
.attach_dev = amd_iommu_attach_device,
|
||||
.map_pages = amd_iommu_map_pages,
|
||||
|
198
drivers/iommu/amd/pasid.c
Normal file
198
drivers/iommu/amd/pasid.c
Normal file
@ -0,0 +1,198 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2024 Advanced Micro Devices, Inc.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "AMD-Vi: " fmt
|
||||
#define dev_fmt(fmt) pr_fmt(fmt)
|
||||
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
#include "amd_iommu.h"
|
||||
|
||||
static inline bool is_pasid_enabled(struct iommu_dev_data *dev_data)
|
||||
{
|
||||
if (dev_data->pasid_enabled && dev_data->max_pasids &&
|
||||
dev_data->gcr3_info.gcr3_tbl != NULL)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_pasid_valid(struct iommu_dev_data *dev_data,
|
||||
ioasid_t pasid)
|
||||
{
|
||||
if (pasid > 0 && pasid < dev_data->max_pasids)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void remove_dev_pasid(struct pdom_dev_data *pdom_dev_data)
|
||||
{
|
||||
/* Update GCR3 table and flush IOTLB */
|
||||
amd_iommu_clear_gcr3(pdom_dev_data->dev_data, pdom_dev_data->pasid);
|
||||
|
||||
list_del(&pdom_dev_data->list);
|
||||
kfree(pdom_dev_data);
|
||||
}
|
||||
|
||||
/* Clear PASID from device GCR3 table and remove pdom_dev_data from list */
|
||||
static void remove_pdom_dev_pasid(struct protection_domain *pdom,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct pdom_dev_data *pdom_dev_data;
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
|
||||
|
||||
lockdep_assert_held(&pdom->lock);
|
||||
|
||||
for_each_pdom_dev_data(pdom_dev_data, pdom) {
|
||||
if (pdom_dev_data->dev_data == dev_data &&
|
||||
pdom_dev_data->pasid == pasid) {
|
||||
remove_dev_pasid(pdom_dev_data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void sva_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
struct pdom_dev_data *pdom_dev_data;
|
||||
struct protection_domain *sva_pdom;
|
||||
unsigned long flags;
|
||||
|
||||
sva_pdom = container_of(mn, struct protection_domain, mn);
|
||||
|
||||
spin_lock_irqsave(&sva_pdom->lock, flags);
|
||||
|
||||
for_each_pdom_dev_data(pdom_dev_data, sva_pdom) {
|
||||
amd_iommu_dev_flush_pasid_pages(pdom_dev_data->dev_data,
|
||||
pdom_dev_data->pasid,
|
||||
start, end - start);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&sva_pdom->lock, flags);
|
||||
}
|
||||
|
||||
static void sva_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
{
|
||||
struct pdom_dev_data *pdom_dev_data, *next;
|
||||
struct protection_domain *sva_pdom;
|
||||
unsigned long flags;
|
||||
|
||||
sva_pdom = container_of(mn, struct protection_domain, mn);
|
||||
|
||||
spin_lock_irqsave(&sva_pdom->lock, flags);
|
||||
|
||||
/* Assume dev_data_list contains same PASID with different devices */
|
||||
for_each_pdom_dev_data_safe(pdom_dev_data, next, sva_pdom)
|
||||
remove_dev_pasid(pdom_dev_data);
|
||||
|
||||
spin_unlock_irqrestore(&sva_pdom->lock, flags);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops sva_mn = {
|
||||
.arch_invalidate_secondary_tlbs = sva_arch_invalidate_secondary_tlbs,
|
||||
.release = sva_mn_release,
|
||||
};
|
||||
|
||||
int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct pdom_dev_data *pdom_dev_data;
|
||||
struct protection_domain *sva_pdom = to_pdomain(domain);
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
|
||||
unsigned long flags;
|
||||
int ret = -EINVAL;
|
||||
|
||||
/* PASID zero is used for requests from the I/O device without PASID */
|
||||
if (!is_pasid_valid(dev_data, pasid))
|
||||
return ret;
|
||||
|
||||
/* Make sure PASID is enabled */
|
||||
if (!is_pasid_enabled(dev_data))
|
||||
return ret;
|
||||
|
||||
/* Add PASID to protection domain pasid list */
|
||||
pdom_dev_data = kzalloc(sizeof(*pdom_dev_data), GFP_KERNEL);
|
||||
if (pdom_dev_data == NULL)
|
||||
return ret;
|
||||
|
||||
pdom_dev_data->pasid = pasid;
|
||||
pdom_dev_data->dev_data = dev_data;
|
||||
|
||||
spin_lock_irqsave(&sva_pdom->lock, flags);
|
||||
|
||||
/* Setup GCR3 table */
|
||||
ret = amd_iommu_set_gcr3(dev_data, pasid,
|
||||
iommu_virt_to_phys(domain->mm->pgd));
|
||||
if (ret) {
|
||||
kfree(pdom_dev_data);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
list_add(&pdom_dev_data->list, &sva_pdom->dev_data_list);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&sva_pdom->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
|
||||
struct iommu_domain *domain)
|
||||
{
|
||||
struct protection_domain *sva_pdom;
|
||||
unsigned long flags;
|
||||
|
||||
if (!is_pasid_valid(dev_iommu_priv_get(dev), pasid))
|
||||
return;
|
||||
|
||||
sva_pdom = to_pdomain(domain);
|
||||
|
||||
spin_lock_irqsave(&sva_pdom->lock, flags);
|
||||
|
||||
/* Remove PASID from dev_data_list */
|
||||
remove_pdom_dev_pasid(sva_pdom, dev, pasid);
|
||||
|
||||
spin_unlock_irqrestore(&sva_pdom->lock, flags);
|
||||
}
|
||||
|
||||
static void iommu_sva_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
struct protection_domain *sva_pdom = to_pdomain(domain);
|
||||
|
||||
if (sva_pdom->mn.ops)
|
||||
mmu_notifier_unregister(&sva_pdom->mn, domain->mm);
|
||||
|
||||
amd_iommu_domain_free(domain);
|
||||
}
|
||||
|
||||
static const struct iommu_domain_ops amd_sva_domain_ops = {
|
||||
.set_dev_pasid = iommu_sva_set_dev_pasid,
|
||||
.free = iommu_sva_domain_free
|
||||
};
|
||||
|
||||
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
struct protection_domain *pdom;
|
||||
int ret;
|
||||
|
||||
pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA);
|
||||
if (!pdom)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pdom->domain.ops = &amd_sva_domain_ops;
|
||||
pdom->mn.ops = &sva_mn;
|
||||
|
||||
ret = mmu_notifier_register(&pdom->mn, mm);
|
||||
if (ret) {
|
||||
protection_domain_free(pdom);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return &pdom->domain;
|
||||
}
|
288
drivers/iommu/amd/ppr.c
Normal file
288
drivers/iommu/amd/ppr.c
Normal file
@ -0,0 +1,288 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2023 Advanced Micro Devices, Inc.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "AMD-Vi: " fmt
|
||||
#define dev_fmt(fmt) pr_fmt(fmt)
|
||||
|
||||
#include <linux/amd-iommu.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
|
||||
#include <asm/iommu.h>
|
||||
|
||||
#include "amd_iommu.h"
|
||||
#include "amd_iommu_types.h"
|
||||
|
||||
#include "../iommu-pages.h"
|
||||
|
||||
int __init amd_iommu_alloc_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu->ppr_log = iommu_alloc_4k_pages(iommu, GFP_KERNEL | __GFP_ZERO,
|
||||
PPR_LOG_SIZE);
|
||||
return iommu->ppr_log ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
void amd_iommu_enable_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
u64 entry;
|
||||
|
||||
if (iommu->ppr_log == NULL)
|
||||
return;
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_PPR_EN);
|
||||
|
||||
entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
|
||||
|
||||
memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
|
||||
&entry, sizeof(entry));
|
||||
|
||||
/* set head and tail to zero manually */
|
||||
writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
|
||||
iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
|
||||
}
|
||||
|
||||
void __init amd_iommu_free_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
iommu_free_pages(iommu->ppr_log, get_order(PPR_LOG_SIZE));
|
||||
}
|
||||
|
||||
/*
|
||||
* This function restarts ppr logging in case the IOMMU experienced
|
||||
* PPR log overflow.
|
||||
*/
|
||||
void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
|
||||
CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
|
||||
MMIO_STATUS_PPR_OVERFLOW_MASK);
|
||||
}
|
||||
|
||||
static inline u32 ppr_flag_to_fault_perm(u16 flag)
|
||||
{
|
||||
int perm = 0;
|
||||
|
||||
if (flag & PPR_FLAG_READ)
|
||||
perm |= IOMMU_FAULT_PERM_READ;
|
||||
if (flag & PPR_FLAG_WRITE)
|
||||
perm |= IOMMU_FAULT_PERM_WRITE;
|
||||
if (flag & PPR_FLAG_EXEC)
|
||||
perm |= IOMMU_FAULT_PERM_EXEC;
|
||||
if (!(flag & PPR_FLAG_US))
|
||||
perm |= IOMMU_FAULT_PERM_PRIV;
|
||||
|
||||
return perm;
|
||||
}
|
||||
|
||||
static bool ppr_is_valid(struct amd_iommu *iommu, u64 *raw)
|
||||
{
|
||||
struct device *dev = iommu->iommu.dev;
|
||||
u16 devid = PPR_DEVID(raw[0]);
|
||||
|
||||
if (!(PPR_FLAGS(raw[0]) & PPR_FLAG_GN)) {
|
||||
dev_dbg(dev, "PPR logged [Request ignored due to GN=0 (device=%04x:%02x:%02x.%x "
|
||||
"pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n",
|
||||
iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
|
||||
PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0]));
|
||||
return false;
|
||||
}
|
||||
|
||||
if (PPR_FLAGS(raw[0]) & PPR_FLAG_RVSD) {
|
||||
dev_dbg(dev, "PPR logged [Invalid request format (device=%04x:%02x:%02x.%x "
|
||||
"pasid=0x%05llx address=0x%llx flags=0x%04llx tag=0x%03llx]\n",
|
||||
iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
|
||||
PPR_PASID(raw[0]), raw[1], PPR_FLAGS(raw[0]), PPR_TAG(raw[0]));
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void iommu_call_iopf_notifier(struct amd_iommu *iommu, u64 *raw)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
struct iopf_fault event;
|
||||
struct pci_dev *pdev;
|
||||
u16 devid = PPR_DEVID(raw[0]);
|
||||
|
||||
if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
|
||||
pr_info_ratelimited("Unknown PPR request received\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pdev = pci_get_domain_bus_and_slot(iommu->pci_seg->id,
|
||||
PCI_BUS_NUM(devid), devid & 0xff);
|
||||
if (!pdev)
|
||||
return;
|
||||
|
||||
if (!ppr_is_valid(iommu, raw))
|
||||
goto out;
|
||||
|
||||
memset(&event, 0, sizeof(struct iopf_fault));
|
||||
|
||||
event.fault.type = IOMMU_FAULT_PAGE_REQ;
|
||||
event.fault.prm.perm = ppr_flag_to_fault_perm(PPR_FLAGS(raw[0]));
|
||||
event.fault.prm.addr = (u64)(raw[1] & PAGE_MASK);
|
||||
event.fault.prm.pasid = PPR_PASID(raw[0]);
|
||||
event.fault.prm.grpid = PPR_TAG(raw[0]) & 0x1FF;
|
||||
|
||||
/*
|
||||
* PASID zero is used for requests from the I/O device without
|
||||
* a PASID
|
||||
*/
|
||||
dev_data = dev_iommu_priv_get(&pdev->dev);
|
||||
if (event.fault.prm.pasid == 0 ||
|
||||
event.fault.prm.pasid >= dev_data->max_pasids) {
|
||||
pr_info_ratelimited("Invalid PASID : 0x%x, device : 0x%x\n",
|
||||
event.fault.prm.pasid, pdev->dev.id);
|
||||
goto out;
|
||||
}
|
||||
|
||||
event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
|
||||
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
|
||||
if (PPR_TAG(raw[0]) & 0x200)
|
||||
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
|
||||
|
||||
/* Submit event */
|
||||
iommu_report_device_fault(&pdev->dev, &event);
|
||||
|
||||
return;
|
||||
|
||||
out:
|
||||
/* Nobody cared, abort */
|
||||
amd_iommu_complete_ppr(&pdev->dev, PPR_PASID(raw[0]),
|
||||
IOMMU_PAGE_RESP_FAILURE,
|
||||
PPR_TAG(raw[0]) & 0x1FF);
|
||||
}
|
||||
|
||||
void amd_iommu_poll_ppr_log(struct amd_iommu *iommu)
|
||||
{
|
||||
u32 head, tail;
|
||||
|
||||
if (iommu->ppr_log == NULL)
|
||||
return;
|
||||
|
||||
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
|
||||
|
||||
while (head != tail) {
|
||||
volatile u64 *raw;
|
||||
u64 entry[2];
|
||||
int i;
|
||||
|
||||
raw = (u64 *)(iommu->ppr_log + head);
|
||||
|
||||
/*
|
||||
* Hardware bug: Interrupt may arrive before the entry is
|
||||
* written to memory. If this happens we need to wait for the
|
||||
* entry to arrive.
|
||||
*/
|
||||
for (i = 0; i < LOOP_TIMEOUT; ++i) {
|
||||
if (PPR_REQ_TYPE(raw[0]) != 0)
|
||||
break;
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
/* Avoid memcpy function-call overhead */
|
||||
entry[0] = raw[0];
|
||||
entry[1] = raw[1];
|
||||
|
||||
/*
|
||||
* To detect the hardware errata 733 we need to clear the
|
||||
* entry back to zero. This issue does not exist on SNP
|
||||
* enabled system. Also this buffer is not writeable on
|
||||
* SNP enabled system.
|
||||
*/
|
||||
if (!amd_iommu_snp_en)
|
||||
raw[0] = raw[1] = 0UL;
|
||||
|
||||
/* Update head pointer of hardware ring-buffer */
|
||||
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
|
||||
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
|
||||
|
||||
/* Handle PPR entry */
|
||||
iommu_call_iopf_notifier(iommu, entry);
|
||||
}
|
||||
}
|
||||
|
||||
/**************************************************************
|
||||
*
|
||||
* IOPF handling stuff
|
||||
*/
|
||||
|
||||
/* Setup per-IOMMU IOPF queue if not exist. */
|
||||
int amd_iommu_iopf_init(struct amd_iommu *iommu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (iommu->iopf_queue)
|
||||
return ret;
|
||||
|
||||
snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
|
||||
"amdiommu-%#x-iopfq",
|
||||
PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid));
|
||||
|
||||
iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name);
|
||||
if (!iommu->iopf_queue)
|
||||
ret = -ENOMEM;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Destroy per-IOMMU IOPF queue if no longer needed. */
|
||||
void amd_iommu_iopf_uninit(struct amd_iommu *iommu)
|
||||
{
|
||||
iopf_queue_free(iommu->iopf_queue);
|
||||
iommu->iopf_queue = NULL;
|
||||
}
|
||||
|
||||
void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
|
||||
struct iommu_page_response *resp)
|
||||
{
|
||||
amd_iommu_complete_ppr(dev, resp->pasid, resp->code, resp->grpid);
|
||||
}
|
||||
|
||||
int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
|
||||
struct iommu_dev_data *dev_data)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (!dev_data->pri_enabled)
|
||||
return ret;
|
||||
|
||||
raw_spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
if (!iommu->iopf_queue) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
dev_data->ppr = true;
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Its assumed that caller has verified that device was added to iopf queue */
|
||||
void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
|
||||
struct iommu_dev_data *dev_data)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&iommu->lock, flags);
|
||||
|
||||
iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
|
||||
dev_data->ppr = false;
|
||||
|
||||
raw_spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
@ -3,3 +3,5 @@ obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
|
||||
arm_smmu_v3-objs-y += arm-smmu-v3.o
|
||||
arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
|
||||
arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
|
||||
|
||||
obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <kunit/visibility.h>
|
||||
|
||||
#include "arm-smmu-v3.h"
|
||||
#include "../../io-pgtable-arm.h"
|
||||
@ -34,21 +35,25 @@ struct arm_smmu_bond {
|
||||
|
||||
static DEFINE_MUTEX(sva_lock);
|
||||
|
||||
/*
|
||||
* Write the CD to the CD tables for all masters that this domain is attached
|
||||
* to. Note that this is only used to update existing CD entries in the target
|
||||
* CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
|
||||
*/
|
||||
static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain,
|
||||
int ssid,
|
||||
struct arm_smmu_ctx_desc *cd)
|
||||
static void
|
||||
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
|
||||
{
|
||||
struct arm_smmu_master *master;
|
||||
struct arm_smmu_cd target_cd;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
||||
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
|
||||
arm_smmu_write_ctx_desc(master, ssid, cd);
|
||||
struct arm_smmu_cd *cdptr;
|
||||
|
||||
/* S1 domains only support RID attachment right now */
|
||||
cdptr = arm_smmu_get_cd_ptr(master, IOMMU_NO_PASID);
|
||||
if (WARN_ON(!cdptr))
|
||||
continue;
|
||||
|
||||
arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
|
||||
arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
|
||||
&target_cd);
|
||||
}
|
||||
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||
}
|
||||
@ -96,7 +101,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
|
||||
* be some overlap between use of both ASIDs, until we invalidate the
|
||||
* TLB.
|
||||
*/
|
||||
arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd);
|
||||
arm_smmu_update_s1_domain_cd_entry(smmu_domain);
|
||||
|
||||
/* Invalidate TLB entries previously associated with that context */
|
||||
arm_smmu_tlb_inv_asid(smmu, asid);
|
||||
@ -105,11 +110,87 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static u64 page_size_to_cd(void)
|
||||
{
|
||||
static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
|
||||
PAGE_SIZE == SZ_64K);
|
||||
if (PAGE_SIZE == SZ_64K)
|
||||
return ARM_LPAE_TCR_TG0_64K;
|
||||
if (PAGE_SIZE == SZ_16K)
|
||||
return ARM_LPAE_TCR_TG0_16K;
|
||||
return ARM_LPAE_TCR_TG0_4K;
|
||||
}
|
||||
|
||||
VISIBLE_IF_KUNIT
|
||||
void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
|
||||
struct arm_smmu_master *master, struct mm_struct *mm,
|
||||
u16 asid)
|
||||
{
|
||||
u64 par;
|
||||
|
||||
memset(target, 0, sizeof(*target));
|
||||
|
||||
par = cpuid_feature_extract_unsigned_field(
|
||||
read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1),
|
||||
ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
|
||||
target->data[0] = cpu_to_le64(
|
||||
CTXDESC_CD_0_TCR_EPD1 |
|
||||
#ifdef __BIG_ENDIAN
|
||||
CTXDESC_CD_0_ENDI |
|
||||
#endif
|
||||
CTXDESC_CD_0_V |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par) |
|
||||
CTXDESC_CD_0_AA64 |
|
||||
(master->stall_enabled ? CTXDESC_CD_0_S : 0) |
|
||||
CTXDESC_CD_0_R |
|
||||
CTXDESC_CD_0_A |
|
||||
CTXDESC_CD_0_ASET |
|
||||
FIELD_PREP(CTXDESC_CD_0_ASID, asid));
|
||||
|
||||
/*
|
||||
* If no MM is passed then this creates a SVA entry that faults
|
||||
* everything. arm_smmu_write_cd_entry() can hitlessly go between these
|
||||
* two entries types since TTB0 is ignored by HW when EPD0 is set.
|
||||
*/
|
||||
if (mm) {
|
||||
target->data[0] |= cpu_to_le64(
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ,
|
||||
64ULL - vabits_actual) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_TG0, page_size_to_cd()) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0,
|
||||
ARM_LPAE_TCR_RGN_WBWA) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0,
|
||||
ARM_LPAE_TCR_RGN_WBWA) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS));
|
||||
|
||||
target->data[1] = cpu_to_le64(virt_to_phys(mm->pgd) &
|
||||
CTXDESC_CD_1_TTB0_MASK);
|
||||
} else {
|
||||
target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_EPD0);
|
||||
|
||||
/*
|
||||
* Disable stall and immediately generate an abort if stall
|
||||
* disable is permitted. This speeds up cleanup for an unclean
|
||||
* exit if the device is still doing a lot of DMA.
|
||||
*/
|
||||
if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
|
||||
target->data[0] &=
|
||||
cpu_to_le64(~(CTXDESC_CD_0_S | CTXDESC_CD_0_R));
|
||||
}
|
||||
|
||||
/*
|
||||
* MAIR value is pretty much constant and global, so we can just get it
|
||||
* from the current CPU register
|
||||
*/
|
||||
target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
|
||||
}
|
||||
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
|
||||
|
||||
static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
|
||||
{
|
||||
u16 asid;
|
||||
int err = 0;
|
||||
u64 tcr, par, reg;
|
||||
struct arm_smmu_ctx_desc *cd;
|
||||
struct arm_smmu_ctx_desc *ret = NULL;
|
||||
|
||||
@ -143,39 +224,6 @@ static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
|
||||
if (err)
|
||||
goto out_free_asid;
|
||||
|
||||
tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, ARM_LPAE_TCR_RGN_WBWA) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, ARM_LPAE_TCR_RGN_WBWA) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, ARM_LPAE_TCR_SH_IS) |
|
||||
CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
|
||||
|
||||
switch (PAGE_SIZE) {
|
||||
case SZ_4K:
|
||||
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_4K);
|
||||
break;
|
||||
case SZ_16K:
|
||||
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_16K);
|
||||
break;
|
||||
case SZ_64K:
|
||||
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_TG0, ARM_LPAE_TCR_TG0_64K);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
err = -EINVAL;
|
||||
goto out_free_asid;
|
||||
}
|
||||
|
||||
reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
|
||||
par = cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR0_EL1_PARANGE_SHIFT);
|
||||
tcr |= FIELD_PREP(CTXDESC_CD_0_TCR_IPS, par);
|
||||
|
||||
cd->ttbr = virt_to_phys(mm->pgd);
|
||||
cd->tcr = tcr;
|
||||
/*
|
||||
* MAIR value is pretty much constant and global, so we can just get it
|
||||
* from the current CPU register
|
||||
*/
|
||||
cd->mair = read_sysreg(mair_el1);
|
||||
cd->asid = asid;
|
||||
cd->mm = mm;
|
||||
|
||||
@ -253,6 +301,8 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
{
|
||||
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
|
||||
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
|
||||
struct arm_smmu_master *master;
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&sva_lock);
|
||||
if (smmu_mn->cleared) {
|
||||
@ -264,8 +314,19 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
|
||||
* but disable translation.
|
||||
*/
|
||||
arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
|
||||
&quiet_cd);
|
||||
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
||||
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
|
||||
struct arm_smmu_cd target;
|
||||
struct arm_smmu_cd *cdptr;
|
||||
|
||||
cdptr = arm_smmu_get_cd_ptr(master, mm_get_enqcmd_pasid(mm));
|
||||
if (WARN_ON(!cdptr))
|
||||
continue;
|
||||
arm_smmu_make_sva_cd(&target, master, NULL, smmu_mn->cd->asid);
|
||||
arm_smmu_write_cd_entry(master, mm_get_enqcmd_pasid(mm), cdptr,
|
||||
&target);
|
||||
}
|
||||
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||
|
||||
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
|
||||
arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
|
||||
@ -360,6 +421,8 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
int ret;
|
||||
struct arm_smmu_cd target;
|
||||
struct arm_smmu_cd *cdptr;
|
||||
struct arm_smmu_bond *bond;
|
||||
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
@ -386,9 +449,13 @@ static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
|
||||
goto err_free_bond;
|
||||
}
|
||||
|
||||
ret = arm_smmu_write_ctx_desc(master, pasid, bond->smmu_mn->cd);
|
||||
if (ret)
|
||||
cdptr = arm_smmu_alloc_cd_ptr(master, mm_get_enqcmd_pasid(mm));
|
||||
if (!cdptr) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_notifier;
|
||||
}
|
||||
arm_smmu_make_sva_cd(&target, master, mm, bond->smmu_mn->cd->asid);
|
||||
arm_smmu_write_cd_entry(master, pasid, cdptr, &target);
|
||||
|
||||
list_add(&bond->list, &master->bonds);
|
||||
return 0;
|
||||
@ -546,7 +613,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
|
||||
|
||||
mutex_lock(&sva_lock);
|
||||
|
||||
arm_smmu_write_ctx_desc(master, id, NULL);
|
||||
arm_smmu_clear_cd(master, id);
|
||||
|
||||
list_for_each_entry(t, &master->bonds, list) {
|
||||
if (t->mm == mm) {
|
||||
@ -569,6 +636,9 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
|
||||
int ret = 0;
|
||||
struct mm_struct *mm = domain->mm;
|
||||
|
||||
if (mm_get_enqcmd_pasid(mm) != id)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&sva_lock);
|
||||
ret = __arm_smmu_sva_bind(dev, id, mm);
|
||||
mutex_unlock(&sva_lock);
|
||||
|
468
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
Normal file
468
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-test.c
Normal file
@ -0,0 +1,468 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright 2024 Google LLC.
|
||||
*/
|
||||
#include <kunit/test.h>
|
||||
#include <linux/io-pgtable.h>
|
||||
|
||||
#include "arm-smmu-v3.h"
|
||||
|
||||
struct arm_smmu_test_writer {
|
||||
struct arm_smmu_entry_writer writer;
|
||||
struct kunit *test;
|
||||
const __le64 *init_entry;
|
||||
const __le64 *target_entry;
|
||||
__le64 *entry;
|
||||
|
||||
bool invalid_entry_written;
|
||||
unsigned int num_syncs;
|
||||
};
|
||||
|
||||
#define NUM_ENTRY_QWORDS 8
|
||||
#define NUM_EXPECTED_SYNCS(x) x
|
||||
|
||||
static struct arm_smmu_ste bypass_ste;
|
||||
static struct arm_smmu_ste abort_ste;
|
||||
static struct arm_smmu_device smmu = {
|
||||
.features = ARM_SMMU_FEAT_STALLS | ARM_SMMU_FEAT_ATTR_TYPES_OVR
|
||||
};
|
||||
static struct mm_struct sva_mm = {
|
||||
.pgd = (void *)0xdaedbeefdeadbeefULL,
|
||||
};
|
||||
|
||||
static bool arm_smmu_entry_differs_in_used_bits(const __le64 *entry,
|
||||
const __le64 *used_bits,
|
||||
const __le64 *target,
|
||||
unsigned int length)
|
||||
{
|
||||
bool differs = false;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < length; i++) {
|
||||
if ((entry[i] & used_bits[i]) != target[i])
|
||||
differs = true;
|
||||
}
|
||||
return differs;
|
||||
}
|
||||
|
||||
static void
|
||||
arm_smmu_test_writer_record_syncs(struct arm_smmu_entry_writer *writer)
|
||||
{
|
||||
struct arm_smmu_test_writer *test_writer =
|
||||
container_of(writer, struct arm_smmu_test_writer, writer);
|
||||
__le64 *entry_used_bits;
|
||||
|
||||
entry_used_bits = kunit_kzalloc(
|
||||
test_writer->test, sizeof(*entry_used_bits) * NUM_ENTRY_QWORDS,
|
||||
GFP_KERNEL);
|
||||
KUNIT_ASSERT_NOT_NULL(test_writer->test, entry_used_bits);
|
||||
|
||||
pr_debug("STE value is now set to: ");
|
||||
print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8,
|
||||
test_writer->entry,
|
||||
NUM_ENTRY_QWORDS * sizeof(*test_writer->entry),
|
||||
false);
|
||||
|
||||
test_writer->num_syncs += 1;
|
||||
if (!test_writer->entry[0]) {
|
||||
test_writer->invalid_entry_written = true;
|
||||
} else {
|
||||
/*
|
||||
* At any stage in a hitless transition, the entry must be
|
||||
* equivalent to either the initial entry or the target entry
|
||||
* when only considering the bits used by the current
|
||||
* configuration.
|
||||
*/
|
||||
writer->ops->get_used(test_writer->entry, entry_used_bits);
|
||||
KUNIT_EXPECT_FALSE(
|
||||
test_writer->test,
|
||||
arm_smmu_entry_differs_in_used_bits(
|
||||
test_writer->entry, entry_used_bits,
|
||||
test_writer->init_entry, NUM_ENTRY_QWORDS) &&
|
||||
arm_smmu_entry_differs_in_used_bits(
|
||||
test_writer->entry, entry_used_bits,
|
||||
test_writer->target_entry,
|
||||
NUM_ENTRY_QWORDS));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
arm_smmu_v3_test_debug_print_used_bits(struct arm_smmu_entry_writer *writer,
|
||||
const __le64 *ste)
|
||||
{
|
||||
__le64 used_bits[NUM_ENTRY_QWORDS] = {};
|
||||
|
||||
arm_smmu_get_ste_used(ste, used_bits);
|
||||
pr_debug("STE used bits: ");
|
||||
print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, used_bits,
|
||||
sizeof(used_bits), false);
|
||||
}
|
||||
|
||||
static const struct arm_smmu_entry_writer_ops test_ste_ops = {
|
||||
.sync = arm_smmu_test_writer_record_syncs,
|
||||
.get_used = arm_smmu_get_ste_used,
|
||||
};
|
||||
|
||||
static const struct arm_smmu_entry_writer_ops test_cd_ops = {
|
||||
.sync = arm_smmu_test_writer_record_syncs,
|
||||
.get_used = arm_smmu_get_cd_used,
|
||||
};
|
||||
|
||||
static void arm_smmu_v3_test_ste_expect_transition(
|
||||
struct kunit *test, const struct arm_smmu_ste *cur,
|
||||
const struct arm_smmu_ste *target, unsigned int num_syncs_expected,
|
||||
bool hitless)
|
||||
{
|
||||
struct arm_smmu_ste cur_copy = *cur;
|
||||
struct arm_smmu_test_writer test_writer = {
|
||||
.writer = {
|
||||
.ops = &test_ste_ops,
|
||||
},
|
||||
.test = test,
|
||||
.init_entry = cur->data,
|
||||
.target_entry = target->data,
|
||||
.entry = cur_copy.data,
|
||||
.num_syncs = 0,
|
||||
.invalid_entry_written = false,
|
||||
|
||||
};
|
||||
|
||||
pr_debug("STE initial value: ");
|
||||
print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
|
||||
sizeof(cur_copy), false);
|
||||
arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
|
||||
pr_debug("STE target value: ");
|
||||
print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, target->data,
|
||||
sizeof(cur_copy), false);
|
||||
arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
|
||||
target->data);
|
||||
|
||||
arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
|
||||
|
||||
KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
|
||||
KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
|
||||
KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_test_ste_expect_hitless_transition(
|
||||
struct kunit *test, const struct arm_smmu_ste *cur,
|
||||
const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
|
||||
{
|
||||
arm_smmu_v3_test_ste_expect_transition(test, cur, target,
|
||||
num_syncs_expected, true);
|
||||
}
|
||||
|
||||
static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0;
|
||||
|
||||
static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
|
||||
const dma_addr_t dma_addr)
|
||||
{
|
||||
struct arm_smmu_master master = {
|
||||
.cd_table.cdtab_dma = dma_addr,
|
||||
.cd_table.s1cdmax = 0xFF,
|
||||
.cd_table.s1fmt = STRTAB_STE_0_S1FMT_64K_L2,
|
||||
.smmu = &smmu,
|
||||
};
|
||||
|
||||
arm_smmu_make_cdtable_ste(ste, &master);
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test)
|
||||
{
|
||||
/*
|
||||
* Bypass STEs has used bits in the first two Qwords, while abort STEs
|
||||
* only have used bits in the first QWord. Transitioning from bypass to
|
||||
* abort requires two syncs: the first to set the first qword and make
|
||||
* the STE into an abort, the second to clean up the second qword.
|
||||
*/
|
||||
arm_smmu_v3_test_ste_expect_hitless_transition(
|
||||
test, &bypass_ste, &abort_ste, NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_ste_test_abort_to_bypass(struct kunit *test)
|
||||
{
|
||||
/*
|
||||
* Transitioning from abort to bypass also requires two syncs: the first
|
||||
* to set the second qword data required by the bypass STE, and the
|
||||
* second to set the first qword and switch to bypass.
|
||||
*/
|
||||
arm_smmu_v3_test_ste_expect_hitless_transition(
|
||||
test, &abort_ste, &bypass_ste, NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_ste ste;
|
||||
|
||||
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
|
||||
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
|
||||
NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_ste ste;
|
||||
|
||||
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
|
||||
arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
|
||||
NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_ste ste;
|
||||
|
||||
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
|
||||
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
|
||||
NUM_EXPECTED_SYNCS(3));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_ste ste;
|
||||
|
||||
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr);
|
||||
arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
|
||||
NUM_EXPECTED_SYNCS(3));
|
||||
}
|
||||
|
||||
static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
|
||||
bool ats_enabled)
|
||||
{
|
||||
struct arm_smmu_master master = {
|
||||
.smmu = &smmu,
|
||||
.ats_enabled = ats_enabled,
|
||||
};
|
||||
struct io_pgtable io_pgtable = {};
|
||||
struct arm_smmu_domain smmu_domain = {
|
||||
.pgtbl_ops = &io_pgtable.ops,
|
||||
};
|
||||
|
||||
io_pgtable.cfg.arm_lpae_s2_cfg.vttbr = 0xdaedbeefdeadbeefULL;
|
||||
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.ps = 1;
|
||||
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tg = 2;
|
||||
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sh = 3;
|
||||
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.orgn = 1;
|
||||
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.irgn = 2;
|
||||
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3;
|
||||
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4;
|
||||
|
||||
arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain);
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_ste ste;
|
||||
|
||||
arm_smmu_test_make_s2_ste(&ste, true);
|
||||
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
|
||||
NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_ste_test_abort_to_s2(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_ste ste;
|
||||
|
||||
arm_smmu_test_make_s2_ste(&ste, true);
|
||||
arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
|
||||
NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_ste_test_s2_to_bypass(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_ste ste;
|
||||
|
||||
arm_smmu_test_make_s2_ste(&ste, true);
|
||||
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
|
||||
NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_ste ste;
|
||||
|
||||
arm_smmu_test_make_s2_ste(&ste, true);
|
||||
arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
|
||||
NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_test_cd_expect_transition(
|
||||
struct kunit *test, const struct arm_smmu_cd *cur,
|
||||
const struct arm_smmu_cd *target, unsigned int num_syncs_expected,
|
||||
bool hitless)
|
||||
{
|
||||
struct arm_smmu_cd cur_copy = *cur;
|
||||
struct arm_smmu_test_writer test_writer = {
|
||||
.writer = {
|
||||
.ops = &test_cd_ops,
|
||||
},
|
||||
.test = test,
|
||||
.init_entry = cur->data,
|
||||
.target_entry = target->data,
|
||||
.entry = cur_copy.data,
|
||||
.num_syncs = 0,
|
||||
.invalid_entry_written = false,
|
||||
|
||||
};
|
||||
|
||||
pr_debug("CD initial value: ");
|
||||
print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, cur_copy.data,
|
||||
sizeof(cur_copy), false);
|
||||
arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer, cur->data);
|
||||
pr_debug("CD target value: ");
|
||||
print_hex_dump_debug(" ", DUMP_PREFIX_NONE, 16, 8, target->data,
|
||||
sizeof(cur_copy), false);
|
||||
arm_smmu_v3_test_debug_print_used_bits(&test_writer.writer,
|
||||
target->data);
|
||||
|
||||
arm_smmu_write_entry(&test_writer.writer, cur_copy.data, target->data);
|
||||
|
||||
KUNIT_EXPECT_EQ(test, test_writer.invalid_entry_written, !hitless);
|
||||
KUNIT_EXPECT_EQ(test, test_writer.num_syncs, num_syncs_expected);
|
||||
KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_test_cd_expect_non_hitless_transition(
|
||||
struct kunit *test, const struct arm_smmu_cd *cur,
|
||||
const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
|
||||
{
|
||||
arm_smmu_v3_test_cd_expect_transition(test, cur, target,
|
||||
num_syncs_expected, false);
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_test_cd_expect_hitless_transition(
|
||||
struct kunit *test, const struct arm_smmu_cd *cur,
|
||||
const struct arm_smmu_cd *target, unsigned int num_syncs_expected)
|
||||
{
|
||||
arm_smmu_v3_test_cd_expect_transition(test, cur, target,
|
||||
num_syncs_expected, true);
|
||||
}
|
||||
|
||||
static void arm_smmu_test_make_s1_cd(struct arm_smmu_cd *cd, unsigned int asid)
|
||||
{
|
||||
struct arm_smmu_master master = {
|
||||
.smmu = &smmu,
|
||||
};
|
||||
struct io_pgtable io_pgtable = {};
|
||||
struct arm_smmu_domain smmu_domain = {
|
||||
.pgtbl_ops = &io_pgtable.ops,
|
||||
.cd = {
|
||||
.asid = asid,
|
||||
},
|
||||
};
|
||||
|
||||
io_pgtable.cfg.arm_lpae_s1_cfg.ttbr = 0xdaedbeefdeadbeefULL;
|
||||
io_pgtable.cfg.arm_lpae_s1_cfg.tcr.ips = 1;
|
||||
io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tg = 2;
|
||||
io_pgtable.cfg.arm_lpae_s1_cfg.tcr.sh = 3;
|
||||
io_pgtable.cfg.arm_lpae_s1_cfg.tcr.orgn = 1;
|
||||
io_pgtable.cfg.arm_lpae_s1_cfg.tcr.irgn = 2;
|
||||
io_pgtable.cfg.arm_lpae_s1_cfg.tcr.tsz = 4;
|
||||
io_pgtable.cfg.arm_lpae_s1_cfg.mair = 0xabcdef012345678ULL;
|
||||
|
||||
arm_smmu_make_s1_cd(cd, &master, &smmu_domain);
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_cd_test_s1_clear(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_cd cd = {};
|
||||
struct arm_smmu_cd cd_2;
|
||||
|
||||
arm_smmu_test_make_s1_cd(&cd_2, 1997);
|
||||
arm_smmu_v3_test_cd_expect_non_hitless_transition(
|
||||
test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
|
||||
arm_smmu_v3_test_cd_expect_non_hitless_transition(
|
||||
test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_cd_test_s1_change_asid(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_cd cd = {};
|
||||
struct arm_smmu_cd cd_2;
|
||||
|
||||
arm_smmu_test_make_s1_cd(&cd, 778);
|
||||
arm_smmu_test_make_s1_cd(&cd_2, 1997);
|
||||
arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
|
||||
NUM_EXPECTED_SYNCS(1));
|
||||
arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
|
||||
NUM_EXPECTED_SYNCS(1));
|
||||
}
|
||||
|
||||
static void arm_smmu_test_make_sva_cd(struct arm_smmu_cd *cd, unsigned int asid)
|
||||
{
|
||||
struct arm_smmu_master master = {
|
||||
.smmu = &smmu,
|
||||
};
|
||||
|
||||
arm_smmu_make_sva_cd(cd, &master, &sva_mm, asid);
|
||||
}
|
||||
|
||||
static void arm_smmu_test_make_sva_release_cd(struct arm_smmu_cd *cd,
|
||||
unsigned int asid)
|
||||
{
|
||||
struct arm_smmu_master master = {
|
||||
.smmu = &smmu,
|
||||
};
|
||||
|
||||
arm_smmu_make_sva_cd(cd, &master, NULL, asid);
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_cd_test_sva_clear(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_cd cd = {};
|
||||
struct arm_smmu_cd cd_2;
|
||||
|
||||
arm_smmu_test_make_sva_cd(&cd_2, 1997);
|
||||
arm_smmu_v3_test_cd_expect_non_hitless_transition(
|
||||
test, &cd, &cd_2, NUM_EXPECTED_SYNCS(2));
|
||||
arm_smmu_v3_test_cd_expect_non_hitless_transition(
|
||||
test, &cd_2, &cd, NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static void arm_smmu_v3_write_cd_test_sva_release(struct kunit *test)
|
||||
{
|
||||
struct arm_smmu_cd cd;
|
||||
struct arm_smmu_cd cd_2;
|
||||
|
||||
arm_smmu_test_make_sva_cd(&cd, 1997);
|
||||
arm_smmu_test_make_sva_release_cd(&cd_2, 1997);
|
||||
arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd, &cd_2,
|
||||
NUM_EXPECTED_SYNCS(2));
|
||||
arm_smmu_v3_test_cd_expect_hitless_transition(test, &cd_2, &cd,
|
||||
NUM_EXPECTED_SYNCS(2));
|
||||
}
|
||||
|
||||
static struct kunit_case arm_smmu_v3_test_cases[] = {
|
||||
KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_abort),
|
||||
KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_bypass),
|
||||
KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_abort),
|
||||
KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable),
|
||||
KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass),
|
||||
KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable),
|
||||
KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort),
|
||||
KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2),
|
||||
KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass),
|
||||
KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2),
|
||||
KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear),
|
||||
KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid),
|
||||
KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear),
|
||||
KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_release),
|
||||
{},
|
||||
};
|
||||
|
||||
static int arm_smmu_v3_test_suite_init(struct kunit_suite *test)
|
||||
{
|
||||
arm_smmu_make_bypass_ste(&smmu, &bypass_ste);
|
||||
arm_smmu_make_abort_ste(&abort_ste);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kunit_suite arm_smmu_v3_test_module = {
|
||||
.name = "arm-smmu-v3-kunit-test",
|
||||
.suite_init = arm_smmu_v3_test_suite_init,
|
||||
.test_cases = arm_smmu_v3_test_cases,
|
||||
};
|
||||
kunit_test_suites(&arm_smmu_v3_test_module);
|
||||
|
||||
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
|
||||
MODULE_LICENSE("GPL v2");
|
@ -26,15 +26,11 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci-ats.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <kunit/visibility.h>
|
||||
|
||||
#include "arm-smmu-v3.h"
|
||||
#include "../../dma-iommu.h"
|
||||
|
||||
static bool disable_bypass = true;
|
||||
module_param(disable_bypass, bool, 0444);
|
||||
MODULE_PARM_DESC(disable_bypass,
|
||||
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
|
||||
|
||||
static bool disable_msipolling;
|
||||
module_param(disable_msipolling, bool, 0444);
|
||||
MODULE_PARM_DESC(disable_msipolling,
|
||||
@ -47,8 +43,9 @@ enum arm_smmu_msi_index {
|
||||
ARM_SMMU_MAX_MSIS,
|
||||
};
|
||||
|
||||
static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu,
|
||||
ioasid_t sid);
|
||||
#define NUM_ENTRY_QWORDS 8
|
||||
static_assert(sizeof(struct arm_smmu_ste) == NUM_ENTRY_QWORDS * sizeof(u64));
|
||||
static_assert(sizeof(struct arm_smmu_cd) == NUM_ENTRY_QWORDS * sizeof(u64));
|
||||
|
||||
static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
|
||||
[EVTQ_MSI_INDEX] = {
|
||||
@ -76,12 +73,6 @@ struct arm_smmu_option_prop {
|
||||
DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa);
|
||||
DEFINE_MUTEX(arm_smmu_asid_lock);
|
||||
|
||||
/*
|
||||
* Special value used by SVA when a process dies, to quiesce a CD without
|
||||
* disabling it.
|
||||
*/
|
||||
struct arm_smmu_ctx_desc quiet_cd = { 0 };
|
||||
|
||||
static struct arm_smmu_option_prop arm_smmu_options[] = {
|
||||
{ ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
|
||||
{ ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"},
|
||||
@ -90,6 +81,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
|
||||
|
||||
static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
|
||||
struct arm_smmu_device *smmu);
|
||||
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master);
|
||||
|
||||
static void parse_driver_options(struct arm_smmu_device *smmu)
|
||||
{
|
||||
@ -977,44 +969,45 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
|
||||
* would be nice if this was complete according to the spec, but minimally it
|
||||
* has to capture the bits this driver uses.
|
||||
*/
|
||||
static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
|
||||
struct arm_smmu_ste *used_bits)
|
||||
VISIBLE_IF_KUNIT
|
||||
void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
|
||||
{
|
||||
unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent->data[0]));
|
||||
unsigned int cfg = FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(ent[0]));
|
||||
|
||||
used_bits->data[0] = cpu_to_le64(STRTAB_STE_0_V);
|
||||
if (!(ent->data[0] & cpu_to_le64(STRTAB_STE_0_V)))
|
||||
used_bits[0] = cpu_to_le64(STRTAB_STE_0_V);
|
||||
if (!(ent[0] & cpu_to_le64(STRTAB_STE_0_V)))
|
||||
return;
|
||||
|
||||
used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
|
||||
used_bits[0] |= cpu_to_le64(STRTAB_STE_0_CFG);
|
||||
|
||||
/* S1 translates */
|
||||
if (cfg & BIT(0)) {
|
||||
used_bits->data[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
|
||||
STRTAB_STE_0_S1CTXPTR_MASK |
|
||||
STRTAB_STE_0_S1CDMAX);
|
||||
used_bits->data[1] |=
|
||||
used_bits[0] |= cpu_to_le64(STRTAB_STE_0_S1FMT |
|
||||
STRTAB_STE_0_S1CTXPTR_MASK |
|
||||
STRTAB_STE_0_S1CDMAX);
|
||||
used_bits[1] |=
|
||||
cpu_to_le64(STRTAB_STE_1_S1DSS | STRTAB_STE_1_S1CIR |
|
||||
STRTAB_STE_1_S1COR | STRTAB_STE_1_S1CSH |
|
||||
STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
|
||||
STRTAB_STE_1_EATS);
|
||||
used_bits->data[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
|
||||
used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
|
||||
}
|
||||
|
||||
/* S2 translates */
|
||||
if (cfg & BIT(1)) {
|
||||
used_bits->data[1] |=
|
||||
used_bits[1] |=
|
||||
cpu_to_le64(STRTAB_STE_1_EATS | STRTAB_STE_1_SHCFG);
|
||||
used_bits->data[2] |=
|
||||
used_bits[2] |=
|
||||
cpu_to_le64(STRTAB_STE_2_S2VMID | STRTAB_STE_2_VTCR |
|
||||
STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2ENDI |
|
||||
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2R);
|
||||
used_bits->data[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
|
||||
used_bits[3] |= cpu_to_le64(STRTAB_STE_3_S2TTB_MASK);
|
||||
}
|
||||
|
||||
if (cfg == STRTAB_STE_0_CFG_BYPASS)
|
||||
used_bits->data[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
|
||||
used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
|
||||
}
|
||||
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used);
|
||||
|
||||
/*
|
||||
* Figure out if we can do a hitless update of entry to become target. Returns a
|
||||
@ -1022,57 +1015,55 @@ static void arm_smmu_get_ste_used(const struct arm_smmu_ste *ent,
|
||||
* unused_update is an intermediate value of entry that has unused bits set to
|
||||
* their new values.
|
||||
*/
|
||||
static u8 arm_smmu_entry_qword_diff(const struct arm_smmu_ste *entry,
|
||||
const struct arm_smmu_ste *target,
|
||||
struct arm_smmu_ste *unused_update)
|
||||
static u8 arm_smmu_entry_qword_diff(struct arm_smmu_entry_writer *writer,
|
||||
const __le64 *entry, const __le64 *target,
|
||||
__le64 *unused_update)
|
||||
{
|
||||
struct arm_smmu_ste target_used = {};
|
||||
struct arm_smmu_ste cur_used = {};
|
||||
__le64 target_used[NUM_ENTRY_QWORDS] = {};
|
||||
__le64 cur_used[NUM_ENTRY_QWORDS] = {};
|
||||
u8 used_qword_diff = 0;
|
||||
unsigned int i;
|
||||
|
||||
arm_smmu_get_ste_used(entry, &cur_used);
|
||||
arm_smmu_get_ste_used(target, &target_used);
|
||||
writer->ops->get_used(entry, cur_used);
|
||||
writer->ops->get_used(target, target_used);
|
||||
|
||||
for (i = 0; i != ARRAY_SIZE(target_used.data); i++) {
|
||||
for (i = 0; i != NUM_ENTRY_QWORDS; i++) {
|
||||
/*
|
||||
* Check that masks are up to date, the make functions are not
|
||||
* allowed to set a bit to 1 if the used function doesn't say it
|
||||
* is used.
|
||||
*/
|
||||
WARN_ON_ONCE(target->data[i] & ~target_used.data[i]);
|
||||
WARN_ON_ONCE(target[i] & ~target_used[i]);
|
||||
|
||||
/* Bits can change because they are not currently being used */
|
||||
unused_update->data[i] = (entry->data[i] & cur_used.data[i]) |
|
||||
(target->data[i] & ~cur_used.data[i]);
|
||||
unused_update[i] = (entry[i] & cur_used[i]) |
|
||||
(target[i] & ~cur_used[i]);
|
||||
/*
|
||||
* Each bit indicates that a used bit in a qword needs to be
|
||||
* changed after unused_update is applied.
|
||||
*/
|
||||
if ((unused_update->data[i] & target_used.data[i]) !=
|
||||
target->data[i])
|
||||
if ((unused_update[i] & target_used[i]) != target[i])
|
||||
used_qword_diff |= 1 << i;
|
||||
}
|
||||
return used_qword_diff;
|
||||
}
|
||||
|
||||
static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
|
||||
struct arm_smmu_ste *entry,
|
||||
const struct arm_smmu_ste *target, unsigned int start,
|
||||
static bool entry_set(struct arm_smmu_entry_writer *writer, __le64 *entry,
|
||||
const __le64 *target, unsigned int start,
|
||||
unsigned int len)
|
||||
{
|
||||
bool changed = false;
|
||||
unsigned int i;
|
||||
|
||||
for (i = start; len != 0; len--, i++) {
|
||||
if (entry->data[i] != target->data[i]) {
|
||||
WRITE_ONCE(entry->data[i], target->data[i]);
|
||||
if (entry[i] != target[i]) {
|
||||
WRITE_ONCE(entry[i], target[i]);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (changed)
|
||||
arm_smmu_sync_ste_for_sid(smmu, sid);
|
||||
writer->ops->sync(writer);
|
||||
return changed;
|
||||
}
|
||||
|
||||
@ -1102,24 +1093,22 @@ static bool entry_set(struct arm_smmu_device *smmu, ioasid_t sid,
|
||||
* V=0 process. This relies on the IGNORED behavior described in the
|
||||
* specification.
|
||||
*/
|
||||
static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
|
||||
struct arm_smmu_ste *entry,
|
||||
const struct arm_smmu_ste *target)
|
||||
VISIBLE_IF_KUNIT
|
||||
void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry,
|
||||
const __le64 *target)
|
||||
{
|
||||
unsigned int num_entry_qwords = ARRAY_SIZE(target->data);
|
||||
struct arm_smmu_device *smmu = master->smmu;
|
||||
struct arm_smmu_ste unused_update;
|
||||
__le64 unused_update[NUM_ENTRY_QWORDS];
|
||||
u8 used_qword_diff;
|
||||
|
||||
used_qword_diff =
|
||||
arm_smmu_entry_qword_diff(entry, target, &unused_update);
|
||||
arm_smmu_entry_qword_diff(writer, entry, target, unused_update);
|
||||
if (hweight8(used_qword_diff) == 1) {
|
||||
/*
|
||||
* Only one qword needs its used bits to be changed. This is a
|
||||
* hitless update, update all bits the current STE is ignoring
|
||||
* to their new values, then update a single "critical qword" to
|
||||
* change the STE and finally 0 out any bits that are now unused
|
||||
* in the target configuration.
|
||||
* hitless update, update all bits the current STE/CD is
|
||||
* ignoring to their new values, then update a single "critical
|
||||
* qword" to change the STE/CD and finally 0 out any bits that
|
||||
* are now unused in the target configuration.
|
||||
*/
|
||||
unsigned int critical_qword_index = ffs(used_qword_diff) - 1;
|
||||
|
||||
@ -1128,22 +1117,21 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
|
||||
* writing it in the next step anyways. This can save a sync
|
||||
* when the only change is in that qword.
|
||||
*/
|
||||
unused_update.data[critical_qword_index] =
|
||||
entry->data[critical_qword_index];
|
||||
entry_set(smmu, sid, entry, &unused_update, 0, num_entry_qwords);
|
||||
entry_set(smmu, sid, entry, target, critical_qword_index, 1);
|
||||
entry_set(smmu, sid, entry, target, 0, num_entry_qwords);
|
||||
unused_update[critical_qword_index] =
|
||||
entry[critical_qword_index];
|
||||
entry_set(writer, entry, unused_update, 0, NUM_ENTRY_QWORDS);
|
||||
entry_set(writer, entry, target, critical_qword_index, 1);
|
||||
entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS);
|
||||
} else if (used_qword_diff) {
|
||||
/*
|
||||
* At least two qwords need their inuse bits to be changed. This
|
||||
* requires a breaking update, zero the V bit, write all qwords
|
||||
* but 0, then set qword 0
|
||||
*/
|
||||
unused_update.data[0] = entry->data[0] &
|
||||
cpu_to_le64(~STRTAB_STE_0_V);
|
||||
entry_set(smmu, sid, entry, &unused_update, 0, 1);
|
||||
entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);
|
||||
entry_set(smmu, sid, entry, target, 0, 1);
|
||||
unused_update[0] = 0;
|
||||
entry_set(writer, entry, unused_update, 0, 1);
|
||||
entry_set(writer, entry, target, 1, NUM_ENTRY_QWORDS - 1);
|
||||
entry_set(writer, entry, target, 0, 1);
|
||||
} else {
|
||||
/*
|
||||
* No inuse bit changed. Sanity check that all unused bits are 0
|
||||
@ -1151,20 +1139,10 @@ static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
|
||||
* compute_qword_diff().
|
||||
*/
|
||||
WARN_ON_ONCE(
|
||||
entry_set(smmu, sid, entry, target, 0, num_entry_qwords));
|
||||
}
|
||||
|
||||
/* It's likely that we'll want to use the new STE soon */
|
||||
if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
|
||||
struct arm_smmu_cmdq_ent
|
||||
prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
|
||||
.prefetch = {
|
||||
.sid = sid,
|
||||
} };
|
||||
|
||||
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
|
||||
entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS));
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry);
|
||||
|
||||
static void arm_smmu_sync_cd(struct arm_smmu_master *master,
|
||||
int ssid, bool leaf)
|
||||
@ -1210,117 +1188,166 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
|
||||
u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
|
||||
CTXDESC_L1_DESC_V;
|
||||
|
||||
/* See comment in arm_smmu_write_ctx_desc() */
|
||||
/* The HW has 64 bit atomicity with stores to the L2 CD table */
|
||||
WRITE_ONCE(*dst, cpu_to_le64(val));
|
||||
}
|
||||
|
||||
static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, u32 ssid)
|
||||
struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
|
||||
u32 ssid)
|
||||
{
|
||||
__le64 *l1ptr;
|
||||
unsigned int idx;
|
||||
struct arm_smmu_l1_ctx_desc *l1_desc;
|
||||
struct arm_smmu_device *smmu = master->smmu;
|
||||
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
|
||||
|
||||
if (!cd_table->cdtab)
|
||||
return NULL;
|
||||
|
||||
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
|
||||
return cd_table->cdtab + ssid * CTXDESC_CD_DWORDS;
|
||||
return (struct arm_smmu_cd *)(cd_table->cdtab +
|
||||
ssid * CTXDESC_CD_DWORDS);
|
||||
|
||||
idx = ssid >> CTXDESC_SPLIT;
|
||||
l1_desc = &cd_table->l1_desc[idx];
|
||||
if (!l1_desc->l2ptr) {
|
||||
if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
|
||||
return NULL;
|
||||
|
||||
l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
|
||||
arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
|
||||
/* An invalid L1CD can be cached */
|
||||
arm_smmu_sync_cd(master, ssid, false);
|
||||
}
|
||||
idx = ssid & (CTXDESC_L2_ENTRIES - 1);
|
||||
return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
|
||||
l1_desc = &cd_table->l1_desc[ssid / CTXDESC_L2_ENTRIES];
|
||||
if (!l1_desc->l2ptr)
|
||||
return NULL;
|
||||
return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES];
|
||||
}
|
||||
|
||||
int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
|
||||
struct arm_smmu_ctx_desc *cd)
|
||||
struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
|
||||
u32 ssid)
|
||||
{
|
||||
/*
|
||||
* This function handles the following cases:
|
||||
*
|
||||
* (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0).
|
||||
* (2) Install a secondary CD, for SID+SSID traffic.
|
||||
* (3) Update ASID of a CD. Atomically write the first 64 bits of the
|
||||
* CD, then invalidate the old entry and mappings.
|
||||
* (4) Quiesce the context without clearing the valid bit. Disable
|
||||
* translation, and ignore any translation fault.
|
||||
* (5) Remove a secondary CD.
|
||||
*/
|
||||
u64 val;
|
||||
bool cd_live;
|
||||
__le64 *cdptr;
|
||||
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
|
||||
struct arm_smmu_device *smmu = master->smmu;
|
||||
|
||||
if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
|
||||
return -E2BIG;
|
||||
might_sleep();
|
||||
iommu_group_mutex_assert(master->dev);
|
||||
|
||||
cdptr = arm_smmu_get_cd_ptr(master, ssid);
|
||||
if (!cdptr)
|
||||
return -ENOMEM;
|
||||
|
||||
val = le64_to_cpu(cdptr[0]);
|
||||
cd_live = !!(val & CTXDESC_CD_0_V);
|
||||
|
||||
if (!cd) { /* (5) */
|
||||
val = 0;
|
||||
} else if (cd == &quiet_cd) { /* (4) */
|
||||
if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
|
||||
val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R);
|
||||
val |= CTXDESC_CD_0_TCR_EPD0;
|
||||
} else if (cd_live) { /* (3) */
|
||||
val &= ~CTXDESC_CD_0_ASID;
|
||||
val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid);
|
||||
/*
|
||||
* Until CD+TLB invalidation, both ASIDs may be used for tagging
|
||||
* this substream's traffic
|
||||
*/
|
||||
} else { /* (1) and (2) */
|
||||
cdptr[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
|
||||
cdptr[2] = 0;
|
||||
cdptr[3] = cpu_to_le64(cd->mair);
|
||||
|
||||
/*
|
||||
* STE may be live, and the SMMU might read dwords of this CD in any
|
||||
* order. Ensure that it observes valid values before reading
|
||||
* V=1.
|
||||
*/
|
||||
arm_smmu_sync_cd(master, ssid, true);
|
||||
|
||||
val = cd->tcr |
|
||||
#ifdef __BIG_ENDIAN
|
||||
CTXDESC_CD_0_ENDI |
|
||||
#endif
|
||||
CTXDESC_CD_0_R | CTXDESC_CD_0_A |
|
||||
(cd->mm ? 0 : CTXDESC_CD_0_ASET) |
|
||||
CTXDESC_CD_0_AA64 |
|
||||
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
|
||||
CTXDESC_CD_0_V;
|
||||
|
||||
if (cd_table->stall_enabled)
|
||||
val |= CTXDESC_CD_0_S;
|
||||
if (!cd_table->cdtab) {
|
||||
if (arm_smmu_alloc_cd_tables(master))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) {
|
||||
unsigned int idx = ssid / CTXDESC_L2_ENTRIES;
|
||||
struct arm_smmu_l1_ctx_desc *l1_desc;
|
||||
|
||||
l1_desc = &cd_table->l1_desc[idx];
|
||||
if (!l1_desc->l2ptr) {
|
||||
__le64 *l1ptr;
|
||||
|
||||
if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
|
||||
return NULL;
|
||||
|
||||
l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
|
||||
arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
|
||||
/* An invalid L1CD can be cached */
|
||||
arm_smmu_sync_cd(master, ssid, false);
|
||||
}
|
||||
}
|
||||
return arm_smmu_get_cd_ptr(master, ssid);
|
||||
}
|
||||
|
||||
struct arm_smmu_cd_writer {
|
||||
struct arm_smmu_entry_writer writer;
|
||||
unsigned int ssid;
|
||||
};
|
||||
|
||||
VISIBLE_IF_KUNIT
|
||||
void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits)
|
||||
{
|
||||
used_bits[0] = cpu_to_le64(CTXDESC_CD_0_V);
|
||||
if (!(ent[0] & cpu_to_le64(CTXDESC_CD_0_V)))
|
||||
return;
|
||||
memset(used_bits, 0xFF, sizeof(struct arm_smmu_cd));
|
||||
|
||||
/*
|
||||
* The SMMU accesses 64-bit values atomically. See IHI0070Ca 3.21.3
|
||||
* "Configuration structures and configuration invalidation completion"
|
||||
*
|
||||
* The size of single-copy atomic reads made by the SMMU is
|
||||
* IMPLEMENTATION DEFINED but must be at least 64 bits. Any single
|
||||
* field within an aligned 64-bit span of a structure can be altered
|
||||
* without first making the structure invalid.
|
||||
* If EPD0 is set by the make function it means
|
||||
* T0SZ/TG0/IR0/OR0/SH0/TTB0 are IGNORED
|
||||
*/
|
||||
WRITE_ONCE(cdptr[0], cpu_to_le64(val));
|
||||
arm_smmu_sync_cd(master, ssid, true);
|
||||
return 0;
|
||||
if (ent[0] & cpu_to_le64(CTXDESC_CD_0_TCR_EPD0)) {
|
||||
used_bits[0] &= ~cpu_to_le64(
|
||||
CTXDESC_CD_0_TCR_T0SZ | CTXDESC_CD_0_TCR_TG0 |
|
||||
CTXDESC_CD_0_TCR_IRGN0 | CTXDESC_CD_0_TCR_ORGN0 |
|
||||
CTXDESC_CD_0_TCR_SH0);
|
||||
used_bits[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_cd_used);
|
||||
|
||||
static void arm_smmu_cd_writer_sync_entry(struct arm_smmu_entry_writer *writer)
|
||||
{
|
||||
struct arm_smmu_cd_writer *cd_writer =
|
||||
container_of(writer, struct arm_smmu_cd_writer, writer);
|
||||
|
||||
arm_smmu_sync_cd(writer->master, cd_writer->ssid, true);
|
||||
}
|
||||
|
||||
static const struct arm_smmu_entry_writer_ops arm_smmu_cd_writer_ops = {
|
||||
.sync = arm_smmu_cd_writer_sync_entry,
|
||||
.get_used = arm_smmu_get_cd_used,
|
||||
};
|
||||
|
||||
void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
|
||||
struct arm_smmu_cd *cdptr,
|
||||
const struct arm_smmu_cd *target)
|
||||
{
|
||||
struct arm_smmu_cd_writer cd_writer = {
|
||||
.writer = {
|
||||
.ops = &arm_smmu_cd_writer_ops,
|
||||
.master = master,
|
||||
},
|
||||
.ssid = ssid,
|
||||
};
|
||||
|
||||
arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data);
|
||||
}
|
||||
|
||||
void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
|
||||
struct arm_smmu_master *master,
|
||||
struct arm_smmu_domain *smmu_domain)
|
||||
{
|
||||
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
|
||||
const struct io_pgtable_cfg *pgtbl_cfg =
|
||||
&io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg;
|
||||
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr =
|
||||
&pgtbl_cfg->arm_lpae_s1_cfg.tcr;
|
||||
|
||||
memset(target, 0, sizeof(*target));
|
||||
|
||||
target->data[0] = cpu_to_le64(
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
|
||||
#ifdef __BIG_ENDIAN
|
||||
CTXDESC_CD_0_ENDI |
|
||||
#endif
|
||||
CTXDESC_CD_0_TCR_EPD1 |
|
||||
CTXDESC_CD_0_V |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
|
||||
CTXDESC_CD_0_AA64 |
|
||||
(master->stall_enabled ? CTXDESC_CD_0_S : 0) |
|
||||
CTXDESC_CD_0_R |
|
||||
CTXDESC_CD_0_A |
|
||||
CTXDESC_CD_0_ASET |
|
||||
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid)
|
||||
);
|
||||
target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
|
||||
CTXDESC_CD_1_TTB0_MASK);
|
||||
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair);
|
||||
}
|
||||
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s1_cd);
|
||||
|
||||
void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
|
||||
{
|
||||
struct arm_smmu_cd target = {};
|
||||
struct arm_smmu_cd *cdptr;
|
||||
|
||||
if (!master->cd_table.cdtab)
|
||||
return;
|
||||
cdptr = arm_smmu_get_cd_ptr(master, ssid);
|
||||
if (WARN_ON(!cdptr))
|
||||
return;
|
||||
arm_smmu_write_cd_entry(master, ssid, cdptr, &target);
|
||||
}
|
||||
|
||||
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
|
||||
@ -1331,7 +1358,6 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
|
||||
struct arm_smmu_device *smmu = master->smmu;
|
||||
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
|
||||
|
||||
cd_table->stall_enabled = master->stall_enabled;
|
||||
cd_table->s1cdmax = master->ssid_bits;
|
||||
max_contexts = 1 << cd_table->s1cdmax;
|
||||
|
||||
@ -1429,33 +1455,75 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
|
||||
val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
|
||||
val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
|
||||
|
||||
/* See comment in arm_smmu_write_ctx_desc() */
|
||||
/* The HW has 64 bit atomicity with stores to the L2 STE table */
|
||||
WRITE_ONCE(*dst, cpu_to_le64(val));
|
||||
}
|
||||
|
||||
static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
|
||||
struct arm_smmu_ste_writer {
|
||||
struct arm_smmu_entry_writer writer;
|
||||
u32 sid;
|
||||
};
|
||||
|
||||
static void arm_smmu_ste_writer_sync_entry(struct arm_smmu_entry_writer *writer)
|
||||
{
|
||||
struct arm_smmu_ste_writer *ste_writer =
|
||||
container_of(writer, struct arm_smmu_ste_writer, writer);
|
||||
struct arm_smmu_cmdq_ent cmd = {
|
||||
.opcode = CMDQ_OP_CFGI_STE,
|
||||
.cfgi = {
|
||||
.sid = sid,
|
||||
.sid = ste_writer->sid,
|
||||
.leaf = true,
|
||||
},
|
||||
};
|
||||
|
||||
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
|
||||
arm_smmu_cmdq_issue_cmd_with_sync(writer->master->smmu, &cmd);
|
||||
}
|
||||
|
||||
static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
|
||||
static const struct arm_smmu_entry_writer_ops arm_smmu_ste_writer_ops = {
|
||||
.sync = arm_smmu_ste_writer_sync_entry,
|
||||
.get_used = arm_smmu_get_ste_used,
|
||||
};
|
||||
|
||||
static void arm_smmu_write_ste(struct arm_smmu_master *master, u32 sid,
|
||||
struct arm_smmu_ste *ste,
|
||||
const struct arm_smmu_ste *target)
|
||||
{
|
||||
struct arm_smmu_device *smmu = master->smmu;
|
||||
struct arm_smmu_ste_writer ste_writer = {
|
||||
.writer = {
|
||||
.ops = &arm_smmu_ste_writer_ops,
|
||||
.master = master,
|
||||
},
|
||||
.sid = sid,
|
||||
};
|
||||
|
||||
arm_smmu_write_entry(&ste_writer.writer, ste->data, target->data);
|
||||
|
||||
/* It's likely that we'll want to use the new STE soon */
|
||||
if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) {
|
||||
struct arm_smmu_cmdq_ent
|
||||
prefetch_cmd = { .opcode = CMDQ_OP_PREFETCH_CFG,
|
||||
.prefetch = {
|
||||
.sid = sid,
|
||||
} };
|
||||
|
||||
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
|
||||
}
|
||||
}
|
||||
|
||||
VISIBLE_IF_KUNIT
|
||||
void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
|
||||
{
|
||||
memset(target, 0, sizeof(*target));
|
||||
target->data[0] = cpu_to_le64(
|
||||
STRTAB_STE_0_V |
|
||||
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
|
||||
}
|
||||
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_abort_ste);
|
||||
|
||||
static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
|
||||
struct arm_smmu_ste *target)
|
||||
VISIBLE_IF_KUNIT
|
||||
void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
|
||||
struct arm_smmu_ste *target)
|
||||
{
|
||||
memset(target, 0, sizeof(*target));
|
||||
target->data[0] = cpu_to_le64(
|
||||
@ -1466,9 +1534,11 @@ static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
|
||||
target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
|
||||
STRTAB_STE_1_SHCFG_INCOMING));
|
||||
}
|
||||
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_bypass_ste);
|
||||
|
||||
static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
|
||||
struct arm_smmu_master *master)
|
||||
VISIBLE_IF_KUNIT
|
||||
void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
|
||||
struct arm_smmu_master *master)
|
||||
{
|
||||
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
|
||||
struct arm_smmu_device *smmu = master->smmu;
|
||||
@ -1516,10 +1586,12 @@ static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
|
||||
cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste);
|
||||
|
||||
static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
|
||||
struct arm_smmu_master *master,
|
||||
struct arm_smmu_domain *smmu_domain)
|
||||
VISIBLE_IF_KUNIT
|
||||
void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
|
||||
struct arm_smmu_master *master,
|
||||
struct arm_smmu_domain *smmu_domain)
|
||||
{
|
||||
struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
|
||||
const struct io_pgtable_cfg *pgtbl_cfg =
|
||||
@ -1562,22 +1634,19 @@ static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
|
||||
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
|
||||
STRTAB_STE_3_S2TTB_MASK);
|
||||
}
|
||||
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s2_domain_ste);
|
||||
|
||||
/*
|
||||
* This can safely directly manipulate the STE memory without a sync sequence
|
||||
* because the STE table has not been installed in the SMMU yet.
|
||||
*/
|
||||
static void arm_smmu_init_initial_stes(struct arm_smmu_device *smmu,
|
||||
struct arm_smmu_ste *strtab,
|
||||
static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,
|
||||
unsigned int nent)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < nent; ++i) {
|
||||
if (disable_bypass)
|
||||
arm_smmu_make_abort_ste(strtab);
|
||||
else
|
||||
arm_smmu_make_bypass_ste(smmu, strtab);
|
||||
arm_smmu_make_abort_ste(strtab);
|
||||
strtab++;
|
||||
}
|
||||
}
|
||||
@ -1605,7 +1674,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
arm_smmu_init_initial_stes(smmu, desc->l2ptr, 1 << STRTAB_SPLIT);
|
||||
arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
|
||||
arm_smmu_write_strtab_l1_desc(strtab, desc);
|
||||
return 0;
|
||||
}
|
||||
@ -2230,13 +2299,11 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
|
||||
}
|
||||
|
||||
static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
|
||||
struct arm_smmu_domain *smmu_domain,
|
||||
struct io_pgtable_cfg *pgtbl_cfg)
|
||||
struct arm_smmu_domain *smmu_domain)
|
||||
{
|
||||
int ret;
|
||||
u32 asid;
|
||||
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
|
||||
typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
|
||||
|
||||
refcount_set(&cd->refs, 1);
|
||||
|
||||
@ -2244,31 +2311,13 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
|
||||
mutex_lock(&arm_smmu_asid_lock);
|
||||
ret = xa_alloc(&arm_smmu_asid_xa, &asid, cd,
|
||||
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
cd->asid = (u16)asid;
|
||||
cd->ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
|
||||
cd->tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
|
||||
FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
|
||||
CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
|
||||
cd->mair = pgtbl_cfg->arm_lpae_s1_cfg.mair;
|
||||
|
||||
mutex_unlock(&arm_smmu_asid_lock);
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&arm_smmu_asid_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
|
||||
struct arm_smmu_domain *smmu_domain,
|
||||
struct io_pgtable_cfg *pgtbl_cfg)
|
||||
struct arm_smmu_domain *smmu_domain)
|
||||
{
|
||||
int vmid;
|
||||
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
|
||||
@ -2292,8 +2341,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
|
||||
struct io_pgtable_cfg pgtbl_cfg;
|
||||
struct io_pgtable_ops *pgtbl_ops;
|
||||
int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
|
||||
struct arm_smmu_domain *smmu_domain,
|
||||
struct io_pgtable_cfg *pgtbl_cfg);
|
||||
struct arm_smmu_domain *smmu_domain);
|
||||
|
||||
/* Restrict the stage to what we can actually support */
|
||||
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
|
||||
@ -2336,7 +2384,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
|
||||
smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
|
||||
smmu_domain->domain.geometry.force_aperture = true;
|
||||
|
||||
ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
|
||||
ret = finalise_stage_fn(smmu, smmu_domain);
|
||||
if (ret < 0) {
|
||||
free_io_pgtable_ops(pgtbl_ops);
|
||||
return ret;
|
||||
@ -2419,7 +2467,10 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master,
|
||||
pdev = to_pci_dev(master->dev);
|
||||
|
||||
atomic_inc(&smmu_domain->nr_ats_masters);
|
||||
arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
|
||||
/*
|
||||
* ATC invalidation of PASID 0 causes the entire ATC to be flushed.
|
||||
*/
|
||||
arm_smmu_atc_inv_master(master);
|
||||
if (pci_enable_ats(pdev, stu))
|
||||
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
|
||||
}
|
||||
@ -2515,6 +2566,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
struct arm_smmu_device *smmu;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
struct arm_smmu_master *master;
|
||||
struct arm_smmu_cd *cdptr;
|
||||
|
||||
if (!fwspec)
|
||||
return -ENOENT;
|
||||
@ -2543,6 +2595,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
|
||||
cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID);
|
||||
if (!cdptr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prevent arm_smmu_share_asid() from trying to change the ASID
|
||||
* of either the old or new domain while we are working on it.
|
||||
@ -2560,49 +2618,26 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||
|
||||
switch (smmu_domain->stage) {
|
||||
case ARM_SMMU_DOMAIN_S1:
|
||||
if (!master->cd_table.cdtab) {
|
||||
ret = arm_smmu_alloc_cd_tables(master);
|
||||
if (ret)
|
||||
goto out_list_del;
|
||||
} else {
|
||||
/*
|
||||
* arm_smmu_write_ctx_desc() relies on the entry being
|
||||
* invalid to work, clear any existing entry.
|
||||
*/
|
||||
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
|
||||
NULL);
|
||||
if (ret)
|
||||
goto out_list_del;
|
||||
}
|
||||
|
||||
ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
|
||||
if (ret)
|
||||
goto out_list_del;
|
||||
case ARM_SMMU_DOMAIN_S1: {
|
||||
struct arm_smmu_cd target_cd;
|
||||
|
||||
arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
|
||||
arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
|
||||
&target_cd);
|
||||
arm_smmu_make_cdtable_ste(&target, master);
|
||||
arm_smmu_install_ste_for_dev(master, &target);
|
||||
break;
|
||||
}
|
||||
case ARM_SMMU_DOMAIN_S2:
|
||||
arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
|
||||
arm_smmu_install_ste_for_dev(master, &target);
|
||||
if (master->cd_table.cdtab)
|
||||
arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
|
||||
NULL);
|
||||
arm_smmu_clear_cd(master, IOMMU_NO_PASID);
|
||||
break;
|
||||
}
|
||||
|
||||
arm_smmu_enable_ats(master, smmu_domain);
|
||||
goto out_unlock;
|
||||
|
||||
out_list_del:
|
||||
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
|
||||
list_del_init(&master->domain_head);
|
||||
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&arm_smmu_asid_lock);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int arm_smmu_attach_dev_ste(struct device *dev,
|
||||
@ -2636,8 +2671,7 @@ static int arm_smmu_attach_dev_ste(struct device *dev,
|
||||
* arm_smmu_domain->devices to avoid races updating the same context
|
||||
* descriptor from arm_smmu_share_asid().
|
||||
*/
|
||||
if (master->cd_table.cdtab)
|
||||
arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
|
||||
arm_smmu_clear_cd(master, IOMMU_NO_PASID);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2915,10 +2949,10 @@ static void arm_smmu_release_device(struct device *dev)
|
||||
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
|
||||
|
||||
/* Put the STE back to what arm_smmu_init_strtab() sets */
|
||||
if (disable_bypass && !dev->iommu->require_direct)
|
||||
arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
|
||||
else
|
||||
if (dev->iommu->require_direct)
|
||||
arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
|
||||
else
|
||||
arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
|
||||
|
||||
arm_smmu_disable_pasid(master);
|
||||
arm_smmu_remove_master(master);
|
||||
@ -3053,14 +3087,9 @@ static int arm_smmu_def_domain_type(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
|
||||
static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
|
||||
struct iommu_domain *domain)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
|
||||
domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA);
|
||||
if (WARN_ON(IS_ERR(domain)) || !domain)
|
||||
return;
|
||||
|
||||
arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
|
||||
}
|
||||
|
||||
@ -3273,7 +3302,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
|
||||
reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);
|
||||
cfg->strtab_base_cfg = reg;
|
||||
|
||||
arm_smmu_init_initial_stes(smmu, strtab, cfg->num_l1_ents);
|
||||
arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3402,7 +3431,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
|
||||
smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
|
||||
|
||||
/* Add callback to free MSIs on teardown */
|
||||
devm_add_action(dev, arm_smmu_free_msis, dev);
|
||||
devm_add_action_or_reset(dev, arm_smmu_free_msis, dev);
|
||||
}
|
||||
|
||||
static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
|
||||
@ -3503,7 +3532,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
|
||||
static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
||||
{
|
||||
int ret;
|
||||
u32 reg, enables;
|
||||
@ -3513,7 +3542,6 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
|
||||
reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
|
||||
if (reg & CR0_SMMUEN) {
|
||||
dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
|
||||
WARN_ON(is_kdump_kernel() && !disable_bypass);
|
||||
arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
|
||||
}
|
||||
|
||||
@ -3620,14 +3648,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
|
||||
if (is_kdump_kernel())
|
||||
enables &= ~(CR0_EVTQEN | CR0_PRIQEN);
|
||||
|
||||
/* Enable the SMMU interface, or ensure bypass */
|
||||
if (!bypass || disable_bypass) {
|
||||
enables |= CR0_SMMUEN;
|
||||
} else {
|
||||
ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
/* Enable the SMMU interface */
|
||||
enables |= CR0_SMMUEN;
|
||||
ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
|
||||
ARM_SMMU_CR0ACK);
|
||||
if (ret) {
|
||||
@ -4019,7 +4041,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
||||
resource_size_t ioaddr;
|
||||
struct arm_smmu_device *smmu;
|
||||
struct device *dev = &pdev->dev;
|
||||
bool bypass;
|
||||
|
||||
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
|
||||
if (!smmu)
|
||||
@ -4030,12 +4051,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
||||
ret = arm_smmu_device_dt_probe(pdev, smmu);
|
||||
} else {
|
||||
ret = arm_smmu_device_acpi_probe(pdev, smmu);
|
||||
if (ret == -ENODEV)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Set bypass mode according to firmware probing result */
|
||||
bypass = !!ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Base address */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
@ -4099,7 +4117,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
||||
arm_smmu_rmr_install_bypass_ste(smmu);
|
||||
|
||||
/* Reset the device */
|
||||
ret = arm_smmu_device_reset(smmu, bypass);
|
||||
ret = arm_smmu_device_reset(smmu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -275,14 +275,18 @@ struct arm_smmu_ste {
|
||||
* 2lvl: at most 1024 L1 entries,
|
||||
* 1024 lazy entries per table.
|
||||
*/
|
||||
#define CTXDESC_SPLIT 10
|
||||
#define CTXDESC_L2_ENTRIES (1 << CTXDESC_SPLIT)
|
||||
#define CTXDESC_L2_ENTRIES 1024
|
||||
|
||||
#define CTXDESC_L1_DESC_DWORDS 1
|
||||
#define CTXDESC_L1_DESC_V (1UL << 0)
|
||||
#define CTXDESC_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 12)
|
||||
|
||||
#define CTXDESC_CD_DWORDS 8
|
||||
|
||||
struct arm_smmu_cd {
|
||||
__le64 data[CTXDESC_CD_DWORDS];
|
||||
};
|
||||
|
||||
#define CTXDESC_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
|
||||
#define CTXDESC_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
|
||||
#define CTXDESC_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
|
||||
@ -583,16 +587,13 @@ struct arm_smmu_strtab_l1_desc {
|
||||
|
||||
struct arm_smmu_ctx_desc {
|
||||
u16 asid;
|
||||
u64 ttbr;
|
||||
u64 tcr;
|
||||
u64 mair;
|
||||
|
||||
refcount_t refs;
|
||||
struct mm_struct *mm;
|
||||
};
|
||||
|
||||
struct arm_smmu_l1_ctx_desc {
|
||||
__le64 *l2ptr;
|
||||
struct arm_smmu_cd *l2ptr;
|
||||
dma_addr_t l2ptr_dma;
|
||||
};
|
||||
|
||||
@ -604,8 +605,6 @@ struct arm_smmu_ctx_desc_cfg {
|
||||
u8 s1fmt;
|
||||
/* log2 of the maximum number of CDs supported by this table */
|
||||
u8 s1cdmax;
|
||||
/* Whether CD entries in this table have the stall bit set. */
|
||||
u8 stall_enabled:1;
|
||||
};
|
||||
|
||||
struct arm_smmu_s2_cfg {
|
||||
@ -737,6 +736,36 @@ struct arm_smmu_domain {
|
||||
struct list_head mmu_notifiers;
|
||||
};
|
||||
|
||||
/* The following are exposed for testing purposes. */
|
||||
struct arm_smmu_entry_writer_ops;
|
||||
struct arm_smmu_entry_writer {
|
||||
const struct arm_smmu_entry_writer_ops *ops;
|
||||
struct arm_smmu_master *master;
|
||||
};
|
||||
|
||||
struct arm_smmu_entry_writer_ops {
|
||||
void (*get_used)(const __le64 *entry, __le64 *used);
|
||||
void (*sync)(struct arm_smmu_entry_writer *writer);
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_KUNIT)
|
||||
void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits);
|
||||
void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur,
|
||||
const __le64 *target);
|
||||
void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits);
|
||||
void arm_smmu_make_abort_ste(struct arm_smmu_ste *target);
|
||||
void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
|
||||
struct arm_smmu_ste *target);
|
||||
void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
|
||||
struct arm_smmu_master *master);
|
||||
void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
|
||||
struct arm_smmu_master *master,
|
||||
struct arm_smmu_domain *smmu_domain);
|
||||
void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
|
||||
struct arm_smmu_master *master, struct mm_struct *mm,
|
||||
u16 asid);
|
||||
#endif
|
||||
|
||||
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
|
||||
{
|
||||
return container_of(dom, struct arm_smmu_domain, domain);
|
||||
@ -744,10 +773,19 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
|
||||
|
||||
extern struct xarray arm_smmu_asid_xa;
|
||||
extern struct mutex arm_smmu_asid_lock;
|
||||
extern struct arm_smmu_ctx_desc quiet_cd;
|
||||
|
||||
int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid,
|
||||
struct arm_smmu_ctx_desc *cd);
|
||||
void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid);
|
||||
struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
|
||||
u32 ssid);
|
||||
struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
|
||||
u32 ssid);
|
||||
void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
|
||||
struct arm_smmu_master *master,
|
||||
struct arm_smmu_domain *smmu_domain);
|
||||
void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
|
||||
struct arm_smmu_cd *cdptr,
|
||||
const struct arm_smmu_cd *target);
|
||||
|
||||
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
|
||||
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
|
||||
size_t granule, bool leaf,
|
||||
|
@ -1,15 +1,66 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/interconnect.h>
|
||||
#include <linux/firmware/qcom/qcom_scm.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "arm-smmu.h"
|
||||
#include "arm-smmu-qcom.h"
|
||||
|
||||
#define TBU_DBG_TIMEOUT_US 100
|
||||
#define DEBUG_AXUSER_REG 0x30
|
||||
#define DEBUG_AXUSER_CDMID GENMASK_ULL(43, 36)
|
||||
#define DEBUG_AXUSER_CDMID_VAL 0xff
|
||||
#define DEBUG_PAR_REG 0x28
|
||||
#define DEBUG_PAR_FAULT_VAL BIT(0)
|
||||
#define DEBUG_PAR_PA GENMASK_ULL(47, 12)
|
||||
#define DEBUG_SID_HALT_REG 0x0
|
||||
#define DEBUG_SID_HALT_VAL BIT(16)
|
||||
#define DEBUG_SID_HALT_SID GENMASK(9, 0)
|
||||
#define DEBUG_SR_HALT_ACK_REG 0x20
|
||||
#define DEBUG_SR_HALT_ACK_VAL BIT(1)
|
||||
#define DEBUG_SR_ECATS_RUNNING_VAL BIT(0)
|
||||
#define DEBUG_TXN_AXCACHE GENMASK(5, 2)
|
||||
#define DEBUG_TXN_AXPROT GENMASK(8, 6)
|
||||
#define DEBUG_TXN_AXPROT_PRIV 0x1
|
||||
#define DEBUG_TXN_AXPROT_NSEC 0x2
|
||||
#define DEBUG_TXN_TRIGG_REG 0x18
|
||||
#define DEBUG_TXN_TRIGGER BIT(0)
|
||||
#define DEBUG_VA_ADDR_REG 0x8
|
||||
|
||||
static LIST_HEAD(tbu_list);
|
||||
static DEFINE_MUTEX(tbu_list_lock);
|
||||
static DEFINE_SPINLOCK(atos_lock);
|
||||
|
||||
struct qcom_tbu {
|
||||
struct device *dev;
|
||||
struct device_node *smmu_np;
|
||||
u32 sid_range[2];
|
||||
struct list_head list;
|
||||
struct clk *clk;
|
||||
struct icc_path *path;
|
||||
void __iomem *base;
|
||||
spinlock_t halt_lock; /* multiple halt or resume can't execute concurrently */
|
||||
int halt_count;
|
||||
};
|
||||
|
||||
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
|
||||
{
|
||||
return container_of(smmu, struct qcom_smmu, smmu);
|
||||
}
|
||||
|
||||
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
|
||||
{
|
||||
int ret;
|
||||
@ -49,3 +100,448 @@ void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
|
||||
tbu_pwr_status, sync_inv_ack, sync_inv_progress);
|
||||
}
|
||||
}
|
||||
|
||||
static struct qcom_tbu *qcom_find_tbu(struct qcom_smmu *qsmmu, u32 sid)
|
||||
{
|
||||
struct qcom_tbu *tbu;
|
||||
u32 start, end;
|
||||
|
||||
guard(mutex)(&tbu_list_lock);
|
||||
|
||||
if (list_empty(&tbu_list))
|
||||
return NULL;
|
||||
|
||||
list_for_each_entry(tbu, &tbu_list, list) {
|
||||
start = tbu->sid_range[0];
|
||||
end = start + tbu->sid_range[1];
|
||||
|
||||
if (qsmmu->smmu.dev->of_node == tbu->smmu_np &&
|
||||
start <= sid && sid < end)
|
||||
return tbu;
|
||||
}
|
||||
dev_err(qsmmu->smmu.dev, "Unable to find TBU for sid 0x%x\n", sid);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int qcom_tbu_halt(struct qcom_tbu *tbu, struct arm_smmu_domain *smmu_domain)
|
||||
{
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
int ret = 0, idx = smmu_domain->cfg.cbndx;
|
||||
u32 val, fsr, status;
|
||||
|
||||
guard(spinlock_irqsave)(&tbu->halt_lock);
|
||||
if (tbu->halt_count) {
|
||||
tbu->halt_count++;
|
||||
return ret;
|
||||
}
|
||||
|
||||
val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
|
||||
val |= DEBUG_SID_HALT_VAL;
|
||||
writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
|
||||
|
||||
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
|
||||
if ((fsr & ARM_SMMU_FSR_FAULT) && (fsr & ARM_SMMU_FSR_SS)) {
|
||||
u32 sctlr_orig, sctlr;
|
||||
|
||||
/*
|
||||
* We are in a fault. Our request to halt the bus will not
|
||||
* complete until transactions in front of us (such as the fault
|
||||
* itself) have completed. Disable iommu faults and terminate
|
||||
* any existing transactions.
|
||||
*/
|
||||
sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR);
|
||||
sctlr = sctlr_orig & ~(ARM_SMMU_SCTLR_CFCFG | ARM_SMMU_SCTLR_CFIE);
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, ARM_SMMU_RESUME_TERMINATE);
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig);
|
||||
}
|
||||
|
||||
if (readl_poll_timeout_atomic(tbu->base + DEBUG_SR_HALT_ACK_REG, status,
|
||||
(status & DEBUG_SR_HALT_ACK_VAL),
|
||||
0, TBU_DBG_TIMEOUT_US)) {
|
||||
dev_err(tbu->dev, "Timeout while trying to halt TBU!\n");
|
||||
ret = -ETIMEDOUT;
|
||||
|
||||
val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
|
||||
val &= ~DEBUG_SID_HALT_VAL;
|
||||
writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
tbu->halt_count = 1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qcom_tbu_resume(struct qcom_tbu *tbu)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
guard(spinlock_irqsave)(&tbu->halt_lock);
|
||||
if (!tbu->halt_count) {
|
||||
WARN(1, "%s: halt_count is 0", dev_name(tbu->dev));
|
||||
return;
|
||||
}
|
||||
|
||||
if (tbu->halt_count > 1) {
|
||||
tbu->halt_count--;
|
||||
return;
|
||||
}
|
||||
|
||||
val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
|
||||
val &= ~DEBUG_SID_HALT_VAL;
|
||||
writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
|
||||
|
||||
tbu->halt_count = 0;
|
||||
}
|
||||
|
||||
static phys_addr_t qcom_tbu_trigger_atos(struct arm_smmu_domain *smmu_domain,
|
||||
struct qcom_tbu *tbu, dma_addr_t iova, u32 sid)
|
||||
{
|
||||
bool atos_timedout = false;
|
||||
phys_addr_t phys = 0;
|
||||
ktime_t timeout;
|
||||
u64 val;
|
||||
|
||||
/* Set address and stream-id */
|
||||
val = readq_relaxed(tbu->base + DEBUG_SID_HALT_REG);
|
||||
val &= ~DEBUG_SID_HALT_SID;
|
||||
val |= FIELD_PREP(DEBUG_SID_HALT_SID, sid);
|
||||
writeq_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
|
||||
writeq_relaxed(iova, tbu->base + DEBUG_VA_ADDR_REG);
|
||||
val = FIELD_PREP(DEBUG_AXUSER_CDMID, DEBUG_AXUSER_CDMID_VAL);
|
||||
writeq_relaxed(val, tbu->base + DEBUG_AXUSER_REG);
|
||||
|
||||
/* Write-back read and write-allocate */
|
||||
val = FIELD_PREP(DEBUG_TXN_AXCACHE, 0xf);
|
||||
|
||||
/* Non-secure access */
|
||||
val |= FIELD_PREP(DEBUG_TXN_AXPROT, DEBUG_TXN_AXPROT_NSEC);
|
||||
|
||||
/* Privileged access */
|
||||
val |= FIELD_PREP(DEBUG_TXN_AXPROT, DEBUG_TXN_AXPROT_PRIV);
|
||||
|
||||
val |= DEBUG_TXN_TRIGGER;
|
||||
writeq_relaxed(val, tbu->base + DEBUG_TXN_TRIGG_REG);
|
||||
|
||||
timeout = ktime_add_us(ktime_get(), TBU_DBG_TIMEOUT_US);
|
||||
for (;;) {
|
||||
val = readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
|
||||
if (!(val & DEBUG_SR_ECATS_RUNNING_VAL))
|
||||
break;
|
||||
val = readl_relaxed(tbu->base + DEBUG_PAR_REG);
|
||||
if (val & DEBUG_PAR_FAULT_VAL)
|
||||
break;
|
||||
if (ktime_compare(ktime_get(), timeout) > 0) {
|
||||
atos_timedout = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
|
||||
if (val & DEBUG_PAR_FAULT_VAL)
|
||||
dev_err(tbu->dev, "ATOS generated a fault interrupt! PAR = %llx, SID=0x%x\n",
|
||||
val, sid);
|
||||
else if (atos_timedout)
|
||||
dev_err_ratelimited(tbu->dev, "ATOS translation timed out!\n");
|
||||
else
|
||||
phys = FIELD_GET(DEBUG_PAR_PA, val);
|
||||
|
||||
/* Reset hardware */
|
||||
writeq_relaxed(0, tbu->base + DEBUG_TXN_TRIGG_REG);
|
||||
writeq_relaxed(0, tbu->base + DEBUG_VA_ADDR_REG);
|
||||
val = readl_relaxed(tbu->base + DEBUG_SID_HALT_REG);
|
||||
val &= ~DEBUG_SID_HALT_SID;
|
||||
writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
|
||||
|
||||
return phys;
|
||||
}
|
||||
|
||||
static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
|
||||
dma_addr_t iova, u32 sid)
|
||||
{
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
|
||||
int idx = smmu_domain->cfg.cbndx;
|
||||
struct qcom_tbu *tbu;
|
||||
u32 sctlr_orig, sctlr;
|
||||
phys_addr_t phys = 0;
|
||||
int attempt = 0;
|
||||
int ret;
|
||||
u64 fsr;
|
||||
|
||||
tbu = qcom_find_tbu(qsmmu, sid);
|
||||
if (!tbu)
|
||||
return 0;
|
||||
|
||||
ret = icc_set_bw(tbu->path, 0, UINT_MAX);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = clk_prepare_enable(tbu->clk);
|
||||
if (ret)
|
||||
goto disable_icc;
|
||||
|
||||
ret = qcom_tbu_halt(tbu, smmu_domain);
|
||||
if (ret)
|
||||
goto disable_clk;
|
||||
|
||||
/*
|
||||
* ATOS/ECATS can trigger the fault interrupt, so disable it temporarily
|
||||
* and check for an interrupt manually.
|
||||
*/
|
||||
sctlr_orig = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_SCTLR);
|
||||
sctlr = sctlr_orig & ~(ARM_SMMU_SCTLR_CFCFG | ARM_SMMU_SCTLR_CFIE);
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
|
||||
|
||||
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
|
||||
if (fsr & ARM_SMMU_FSR_FAULT) {
|
||||
/* Clear pending interrupts */
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
|
||||
|
||||
/*
|
||||
* TBU halt takes care of resuming any stalled transcation.
|
||||
* Kept it here for completeness sake.
|
||||
*/
|
||||
if (fsr & ARM_SMMU_FSR_SS)
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
|
||||
ARM_SMMU_RESUME_TERMINATE);
|
||||
}
|
||||
|
||||
/* Only one concurrent atos operation */
|
||||
scoped_guard(spinlock_irqsave, &atos_lock) {
|
||||
/*
|
||||
* If the translation fails, attempt the lookup more time."
|
||||
*/
|
||||
do {
|
||||
phys = qcom_tbu_trigger_atos(smmu_domain, tbu, iova, sid);
|
||||
|
||||
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
|
||||
if (fsr & ARM_SMMU_FSR_FAULT) {
|
||||
/* Clear pending interrupts */
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
|
||||
|
||||
if (fsr & ARM_SMMU_FSR_SS)
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
|
||||
ARM_SMMU_RESUME_TERMINATE);
|
||||
}
|
||||
} while (!phys && attempt++ < 2);
|
||||
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr_orig);
|
||||
}
|
||||
qcom_tbu_resume(tbu);
|
||||
|
||||
/* Read to complete prior write transcations */
|
||||
readl_relaxed(tbu->base + DEBUG_SR_HALT_ACK_REG);
|
||||
|
||||
disable_clk:
|
||||
clk_disable_unprepare(tbu->clk);
|
||||
disable_icc:
|
||||
icc_set_bw(tbu->path, 0, 0);
|
||||
|
||||
return phys;
|
||||
}
|
||||
|
||||
static phys_addr_t qcom_smmu_iova_to_phys_hard(struct arm_smmu_domain *smmu_domain, dma_addr_t iova)
|
||||
{
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
int idx = smmu_domain->cfg.cbndx;
|
||||
u32 frsynra;
|
||||
u16 sid;
|
||||
|
||||
frsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
|
||||
sid = FIELD_GET(ARM_SMMU_CBFRSYNRA_SID, frsynra);
|
||||
|
||||
return qcom_iova_to_phys(smmu_domain, iova, sid);
|
||||
}
|
||||
|
||||
static phys_addr_t qcom_smmu_verify_fault(struct arm_smmu_domain *smmu_domain, dma_addr_t iova, u32 fsr)
|
||||
{
|
||||
struct io_pgtable *iop = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops);
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
phys_addr_t phys_post_tlbiall;
|
||||
phys_addr_t phys;
|
||||
|
||||
phys = qcom_smmu_iova_to_phys_hard(smmu_domain, iova);
|
||||
io_pgtable_tlb_flush_all(iop);
|
||||
phys_post_tlbiall = qcom_smmu_iova_to_phys_hard(smmu_domain, iova);
|
||||
|
||||
if (phys != phys_post_tlbiall) {
|
||||
dev_err(smmu->dev,
|
||||
"ATOS results differed across TLBIALL... (before: %pa after: %pa)\n",
|
||||
&phys, &phys_post_tlbiall);
|
||||
}
|
||||
|
||||
return (phys == 0 ? phys_post_tlbiall : phys);
|
||||
}
|
||||
|
||||
irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain = dev;
|
||||
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
u32 fsr, fsynr, cbfrsynra, resume = 0;
|
||||
int idx = smmu_domain->cfg.cbndx;
|
||||
phys_addr_t phys_soft;
|
||||
unsigned long iova;
|
||||
int ret, tmp;
|
||||
|
||||
static DEFINE_RATELIMIT_STATE(_rs,
|
||||
DEFAULT_RATELIMIT_INTERVAL,
|
||||
DEFAULT_RATELIMIT_BURST);
|
||||
|
||||
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
|
||||
if (!(fsr & ARM_SMMU_FSR_FAULT))
|
||||
return IRQ_NONE;
|
||||
|
||||
fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
|
||||
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
|
||||
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
|
||||
|
||||
if (list_empty(&tbu_list)) {
|
||||
ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
|
||||
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
|
||||
|
||||
if (ret == -ENOSYS)
|
||||
dev_err_ratelimited(smmu->dev,
|
||||
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
|
||||
fsr, iova, fsynr, cbfrsynra, idx);
|
||||
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
phys_soft = ops->iova_to_phys(ops, iova);
|
||||
|
||||
tmp = report_iommu_fault(&smmu_domain->domain, NULL, iova,
|
||||
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
|
||||
if (!tmp || tmp == -EBUSY) {
|
||||
dev_dbg(smmu->dev,
|
||||
"Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
|
||||
iova, fsr, fsynr, idx);
|
||||
dev_dbg(smmu->dev, "soft iova-to-phys=%pa\n", &phys_soft);
|
||||
ret = IRQ_HANDLED;
|
||||
resume = ARM_SMMU_RESUME_TERMINATE;
|
||||
} else {
|
||||
phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, iova, fsr);
|
||||
|
||||
if (__ratelimit(&_rs)) {
|
||||
dev_err(smmu->dev,
|
||||
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
|
||||
fsr, iova, fsynr, cbfrsynra, idx);
|
||||
dev_err(smmu->dev,
|
||||
"FSR = %08x [%s%s%s%s%s%s%s%s%s], SID=0x%x\n",
|
||||
fsr,
|
||||
(fsr & 0x02) ? "TF " : "",
|
||||
(fsr & 0x04) ? "AFF " : "",
|
||||
(fsr & 0x08) ? "PF " : "",
|
||||
(fsr & 0x10) ? "EF " : "",
|
||||
(fsr & 0x20) ? "TLBMCF " : "",
|
||||
(fsr & 0x40) ? "TLBLKF " : "",
|
||||
(fsr & 0x80) ? "MHF " : "",
|
||||
(fsr & 0x40000000) ? "SS " : "",
|
||||
(fsr & 0x80000000) ? "MULTI " : "",
|
||||
cbfrsynra);
|
||||
|
||||
dev_err(smmu->dev,
|
||||
"soft iova-to-phys=%pa\n", &phys_soft);
|
||||
if (!phys_soft)
|
||||
dev_err(smmu->dev,
|
||||
"SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
|
||||
dev_name(smmu->dev));
|
||||
if (phys_atos)
|
||||
dev_err(smmu->dev, "hard iova-to-phys (ATOS)=%pa\n",
|
||||
&phys_atos);
|
||||
else
|
||||
dev_err(smmu->dev, "hard iova-to-phys (ATOS) failed\n");
|
||||
}
|
||||
ret = IRQ_NONE;
|
||||
resume = ARM_SMMU_RESUME_TERMINATE;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the client returns -EBUSY, do not clear FSR and do not RESUME
|
||||
* if stalled. This is required to keep the IOMMU client stalled on
|
||||
* the outstanding fault. This gives the client a chance to take any
|
||||
* debug action and then terminate the stalled transaction.
|
||||
* So, the sequence in case of stall on fault should be:
|
||||
* 1) Do not clear FSR or write to RESUME here
|
||||
* 2) Client takes any debug action
|
||||
* 3) Client terminates the stalled transaction and resumes the IOMMU
|
||||
* 4) Client clears FSR. The FSR should only be cleared after 3) and
|
||||
* not before so that the fault remains outstanding. This ensures
|
||||
* SCTLR.HUPCF has the desired effect if subsequent transactions also
|
||||
* need to be terminated.
|
||||
*/
|
||||
if (tmp != -EBUSY) {
|
||||
/* Clear the faulting FSR */
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
|
||||
|
||||
/* Retry or terminate any stalled transactions */
|
||||
if (fsr & ARM_SMMU_FSR_SS)
|
||||
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, resume);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_tbu_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct of_phandle_args args = { .args_count = 2 };
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct qcom_tbu *tbu;
|
||||
|
||||
tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
|
||||
if (!tbu)
|
||||
return -ENOMEM;
|
||||
|
||||
tbu->dev = dev;
|
||||
INIT_LIST_HEAD(&tbu->list);
|
||||
spin_lock_init(&tbu->halt_lock);
|
||||
|
||||
if (of_parse_phandle_with_args(np, "qcom,stream-id-range", "#iommu-cells", 0, &args)) {
|
||||
dev_err(dev, "Cannot parse the 'qcom,stream-id-range' DT property\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tbu->smmu_np = args.np;
|
||||
tbu->sid_range[0] = args.args[0];
|
||||
tbu->sid_range[1] = args.args[1];
|
||||
of_node_put(args.np);
|
||||
|
||||
tbu->base = devm_of_iomap(dev, np, 0, NULL);
|
||||
if (IS_ERR(tbu->base))
|
||||
return PTR_ERR(tbu->base);
|
||||
|
||||
tbu->clk = devm_clk_get_optional(dev, NULL);
|
||||
if (IS_ERR(tbu->clk))
|
||||
return PTR_ERR(tbu->clk);
|
||||
|
||||
tbu->path = devm_of_icc_get(dev, NULL);
|
||||
if (IS_ERR(tbu->path))
|
||||
return PTR_ERR(tbu->path);
|
||||
|
||||
guard(mutex)(&tbu_list_lock);
|
||||
list_add_tail(&tbu->list, &tbu_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id qcom_tbu_of_match[] = {
|
||||
{ .compatible = "qcom,sc7280-tbu" },
|
||||
{ .compatible = "qcom,sdm845-tbu" },
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct platform_driver qcom_tbu_driver = {
|
||||
.driver = {
|
||||
.name = "qcom_tbu",
|
||||
.of_match_table = qcom_tbu_of_match,
|
||||
},
|
||||
.probe = qcom_tbu_probe,
|
||||
};
|
||||
builtin_platform_driver(qcom_tbu_driver);
|
||||
|
@ -413,6 +413,10 @@ static const struct arm_smmu_impl qcom_smmu_500_impl = {
|
||||
.reset = arm_mmu500_reset,
|
||||
.write_s2cr = qcom_smmu_write_s2cr,
|
||||
.tlb_sync = qcom_smmu_tlb_sync,
|
||||
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
|
||||
.context_fault = qcom_smmu_context_fault,
|
||||
.context_fault_needs_threaded_irq = true,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct arm_smmu_impl sdm845_smmu_500_impl = {
|
||||
@ -422,6 +426,10 @@ static const struct arm_smmu_impl sdm845_smmu_500_impl = {
|
||||
.reset = qcom_sdm845_smmu500_reset,
|
||||
.write_s2cr = qcom_smmu_write_s2cr,
|
||||
.tlb_sync = qcom_smmu_tlb_sync,
|
||||
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
|
||||
.context_fault = qcom_smmu_context_fault,
|
||||
.context_fault_needs_threaded_irq = true,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
|
||||
|
@ -30,6 +30,8 @@ struct qcom_smmu_match_data {
|
||||
const struct arm_smmu_impl *adreno_impl;
|
||||
};
|
||||
|
||||
irqreturn_t qcom_smmu_context_fault(int irq, void *dev);
|
||||
|
||||
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
|
||||
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
|
||||
#else
|
||||
|
@ -806,8 +806,16 @@ static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
|
||||
else
|
||||
context_fault = arm_smmu_context_fault;
|
||||
|
||||
ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
|
||||
"arm-smmu-context-fault", smmu_domain);
|
||||
if (smmu->impl && smmu->impl->context_fault_needs_threaded_irq)
|
||||
ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
|
||||
context_fault,
|
||||
IRQF_ONESHOT | IRQF_SHARED,
|
||||
"arm-smmu-context-fault",
|
||||
smmu_domain);
|
||||
else
|
||||
ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
|
||||
"arm-smmu-context-fault", smmu_domain);
|
||||
|
||||
if (ret < 0) {
|
||||
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
|
||||
cfg->irptndx, irq);
|
||||
@ -859,14 +867,10 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
|
||||
arm_smmu_rpm_put(smmu);
|
||||
}
|
||||
|
||||
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
|
||||
static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
|
||||
{
|
||||
struct arm_smmu_domain *smmu_domain;
|
||||
|
||||
if (type != IOMMU_DOMAIN_UNMANAGED) {
|
||||
if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Allocate the domain and initialise some of its data structures.
|
||||
* We can't really do anything meaningful until we've added a
|
||||
@ -1596,7 +1600,7 @@ static struct iommu_ops arm_smmu_ops = {
|
||||
.identity_domain = &arm_smmu_identity_domain,
|
||||
.blocked_domain = &arm_smmu_blocked_domain,
|
||||
.capable = arm_smmu_capable,
|
||||
.domain_alloc = arm_smmu_domain_alloc,
|
||||
.domain_alloc_paging = arm_smmu_domain_alloc_paging,
|
||||
.probe_device = arm_smmu_probe_device,
|
||||
.release_device = arm_smmu_release_device,
|
||||
.probe_finalize = arm_smmu_probe_finalize,
|
||||
|
@ -136,6 +136,7 @@ enum arm_smmu_cbar_type {
|
||||
#define ARM_SMMU_CBAR_VMID GENMASK(7, 0)
|
||||
|
||||
#define ARM_SMMU_GR1_CBFRSYNRA(n) (0x400 + ((n) << 2))
|
||||
#define ARM_SMMU_CBFRSYNRA_SID GENMASK(15, 0)
|
||||
|
||||
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
|
||||
#define ARM_SMMU_CBA2R_VMID16 GENMASK(31, 16)
|
||||
@ -238,6 +239,7 @@ enum arm_smmu_cbar_type {
|
||||
#define ARM_SMMU_CB_ATSR 0x8f0
|
||||
#define ARM_SMMU_ATSR_ACTIVE BIT(0)
|
||||
|
||||
#define ARM_SMMU_RESUME_TERMINATE BIT(0)
|
||||
|
||||
/* Maximum number of context banks per SMMU */
|
||||
#define ARM_SMMU_MAX_CBS 128
|
||||
@ -436,6 +438,7 @@ struct arm_smmu_impl {
|
||||
int (*def_domain_type)(struct device *dev);
|
||||
irqreturn_t (*global_fault)(int irq, void *dev);
|
||||
irqreturn_t (*context_fault)(int irq, void *dev);
|
||||
bool context_fault_needs_threaded_irq;
|
||||
int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain,
|
||||
struct arm_smmu_device *smmu,
|
||||
struct device *dev, int start);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <trace/events/swiotlb.h>
|
||||
|
||||
#include "dma-iommu.h"
|
||||
#include "iommu-pages.h"
|
||||
|
||||
struct iommu_dma_msi_page {
|
||||
struct list_head list;
|
||||
@ -156,7 +157,7 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
|
||||
if (fq->entries[idx].counter >= counter)
|
||||
break;
|
||||
|
||||
put_pages_list(&fq->entries[idx].freelist);
|
||||
iommu_put_pages_list(&fq->entries[idx].freelist);
|
||||
free_iova_fast(&cookie->iovad,
|
||||
fq->entries[idx].iova_pfn,
|
||||
fq->entries[idx].pages);
|
||||
@ -254,7 +255,7 @@ static void iommu_dma_free_fq_single(struct iova_fq *fq)
|
||||
int idx;
|
||||
|
||||
fq_ring_for_each(idx, fq)
|
||||
put_pages_list(&fq->entries[idx].freelist);
|
||||
iommu_put_pages_list(&fq->entries[idx].freelist);
|
||||
vfree(fq);
|
||||
}
|
||||
|
||||
@ -267,7 +268,7 @@ static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
|
||||
struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
|
||||
|
||||
fq_ring_for_each(idx, fq)
|
||||
put_pages_list(&fq->entries[idx].freelist);
|
||||
iommu_put_pages_list(&fq->entries[idx].freelist);
|
||||
}
|
||||
|
||||
free_percpu(percpu_fq);
|
||||
@ -660,19 +661,16 @@ static void iommu_dma_init_options(struct iommu_dma_options *options,
|
||||
/**
|
||||
* iommu_dma_init_domain - Initialise a DMA mapping domain
|
||||
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
|
||||
* @base: IOVA at which the mappable address space starts
|
||||
* @limit: Last address of the IOVA space
|
||||
* @dev: Device the domain is being initialised for
|
||||
*
|
||||
* @base and @limit + 1 should be exact multiples of IOMMU page granularity to
|
||||
* avoid rounding surprises. If necessary, we reserve the page at address 0
|
||||
* If the geometry and dma_range_map include address 0, we reserve that page
|
||||
* to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
|
||||
* any change which could make prior IOVAs invalid will fail.
|
||||
*/
|
||||
static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
dma_addr_t limit, struct device *dev)
|
||||
static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev)
|
||||
{
|
||||
struct iommu_dma_cookie *cookie = domain->iova_cookie;
|
||||
const struct bus_dma_region *map = dev->dma_range_map;
|
||||
unsigned long order, base_pfn;
|
||||
struct iova_domain *iovad;
|
||||
int ret;
|
||||
@ -684,18 +682,18 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
|
||||
|
||||
/* Use the smallest supported page size for IOVA granularity */
|
||||
order = __ffs(domain->pgsize_bitmap);
|
||||
base_pfn = max_t(unsigned long, 1, base >> order);
|
||||
base_pfn = 1;
|
||||
|
||||
/* Check the domain allows at least some access to the device... */
|
||||
if (domain->geometry.force_aperture) {
|
||||
if (map) {
|
||||
dma_addr_t base = dma_range_map_min(map);
|
||||
if (base > domain->geometry.aperture_end ||
|
||||
limit < domain->geometry.aperture_start) {
|
||||
dma_range_map_max(map) < domain->geometry.aperture_start) {
|
||||
pr_warn("specified DMA range outside IOMMU capability\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
/* ...then finally give it a kicking to make sure it fits */
|
||||
base_pfn = max_t(unsigned long, base_pfn,
|
||||
domain->geometry.aperture_start >> order);
|
||||
base_pfn = max(base, domain->geometry.aperture_start) >> order;
|
||||
}
|
||||
|
||||
/* start_pfn is always nonzero for an already-initialised domain */
|
||||
@ -1744,25 +1742,20 @@ static const struct dma_map_ops iommu_dma_ops = {
|
||||
.max_mapping_size = iommu_dma_max_mapping_size,
|
||||
};
|
||||
|
||||
/*
|
||||
* The IOMMU core code allocates the default DMA domain, which the underlying
|
||||
* IOMMU driver needs to support via the dma-iommu layer.
|
||||
*/
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
|
||||
void iommu_setup_dma_ops(struct device *dev)
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
if (!domain)
|
||||
goto out_err;
|
||||
if (dev_is_pci(dev))
|
||||
dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
|
||||
|
||||
/*
|
||||
* The IOMMU core code allocates the default DMA domain, which the
|
||||
* underlying IOMMU driver needs to support via the dma-iommu layer.
|
||||
*/
|
||||
if (iommu_is_dma_domain(domain)) {
|
||||
if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
|
||||
if (iommu_dma_init_domain(domain, dev))
|
||||
goto out_err;
|
||||
dev->dma_ops = &iommu_dma_ops;
|
||||
} else if (dev->dma_ops == &iommu_dma_ops) {
|
||||
/* Clean up if we've switched *from* a DMA domain */
|
||||
dev->dma_ops = NULL;
|
||||
}
|
||||
|
||||
return;
|
||||
@ -1770,7 +1763,6 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
|
||||
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
|
||||
dev_name(dev));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_setup_dma_ops);
|
||||
|
||||
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
|
||||
phys_addr_t msi_addr, struct iommu_domain *domain)
|
||||
|
@ -9,6 +9,8 @@
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
|
||||
void iommu_setup_dma_ops(struct device *dev);
|
||||
|
||||
int iommu_get_dma_cookie(struct iommu_domain *domain);
|
||||
void iommu_put_dma_cookie(struct iommu_domain *domain);
|
||||
|
||||
@ -17,13 +19,13 @@ int iommu_dma_init_fq(struct iommu_domain *domain);
|
||||
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
|
||||
|
||||
extern bool iommu_dma_forcedac;
|
||||
static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
|
||||
{
|
||||
dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
|
||||
}
|
||||
|
||||
#else /* CONFIG_IOMMU_DMA */
|
||||
|
||||
static inline void iommu_setup_dma_ops(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_dma_init_fq(struct iommu_domain *domain)
|
||||
{
|
||||
return -EINVAL;
|
||||
@ -42,9 +44,5 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
|
||||
{
|
||||
}
|
||||
|
||||
static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
#endif /* __DMA_IOMMU_H */
|
||||
|
@ -22,6 +22,8 @@
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "iommu-pages.h"
|
||||
|
||||
typedef u32 sysmmu_iova_t;
|
||||
typedef u32 sysmmu_pte_t;
|
||||
static struct iommu_domain exynos_identity_domain;
|
||||
@ -900,11 +902,11 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
|
||||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
|
||||
domain->pgtable = iommu_alloc_pages(GFP_KERNEL, 2);
|
||||
if (!domain->pgtable)
|
||||
goto err_pgtable;
|
||||
|
||||
domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
|
||||
domain->lv2entcnt = iommu_alloc_pages(GFP_KERNEL, 1);
|
||||
if (!domain->lv2entcnt)
|
||||
goto err_counter;
|
||||
|
||||
@ -930,9 +932,9 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
|
||||
return &domain->domain;
|
||||
|
||||
err_lv2ent:
|
||||
free_pages((unsigned long)domain->lv2entcnt, 1);
|
||||
iommu_free_pages(domain->lv2entcnt, 1);
|
||||
err_counter:
|
||||
free_pages((unsigned long)domain->pgtable, 2);
|
||||
iommu_free_pages(domain->pgtable, 2);
|
||||
err_pgtable:
|
||||
kfree(domain);
|
||||
return NULL;
|
||||
@ -973,8 +975,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
|
||||
phys_to_virt(base));
|
||||
}
|
||||
|
||||
free_pages((unsigned long)domain->pgtable, 2);
|
||||
free_pages((unsigned long)domain->lv2entcnt, 1);
|
||||
iommu_free_pages(domain->pgtable, 2);
|
||||
iommu_free_pages(domain->lv2entcnt, 1);
|
||||
kfree(domain);
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-$(CONFIG_DMAR_TABLE) += dmar.o
|
||||
obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o
|
||||
obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o nested.o cache.o
|
||||
obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
|
||||
obj-$(CONFIG_DMAR_PERF) += perf.o
|
||||
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
|
||||
|
419
drivers/iommu/intel/cache.c
Normal file
419
drivers/iommu/intel/cache.c
Normal file
@ -0,0 +1,419 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* cache.c - Intel VT-d cache invalidation
|
||||
*
|
||||
* Copyright (C) 2024 Intel Corporation
|
||||
*
|
||||
* Author: Lu Baolu <baolu.lu@linux.intel.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "DMAR: " fmt
|
||||
|
||||
#include <linux/dmar.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/memory.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "iommu.h"
|
||||
#include "pasid.h"
|
||||
#include "trace.h"
|
||||
|
||||
/* Check if an existing cache tag can be reused for a new association. */
|
||||
static bool cache_tage_match(struct cache_tag *tag, u16 domain_id,
|
||||
struct intel_iommu *iommu, struct device *dev,
|
||||
ioasid_t pasid, enum cache_tag_type type)
|
||||
{
|
||||
if (tag->type != type)
|
||||
return false;
|
||||
|
||||
if (tag->domain_id != domain_id || tag->pasid != pasid)
|
||||
return false;
|
||||
|
||||
if (type == CACHE_TAG_IOTLB || type == CACHE_TAG_NESTING_IOTLB)
|
||||
return tag->iommu == iommu;
|
||||
|
||||
if (type == CACHE_TAG_DEVTLB || type == CACHE_TAG_NESTING_DEVTLB)
|
||||
return tag->dev == dev;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Assign a cache tag with specified type to domain. */
|
||||
static int cache_tag_assign(struct dmar_domain *domain, u16 did,
|
||||
struct device *dev, ioasid_t pasid,
|
||||
enum cache_tag_type type)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
struct cache_tag *tag, *temp;
|
||||
unsigned long flags;
|
||||
|
||||
tag = kzalloc(sizeof(*tag), GFP_KERNEL);
|
||||
if (!tag)
|
||||
return -ENOMEM;
|
||||
|
||||
tag->type = type;
|
||||
tag->iommu = iommu;
|
||||
tag->domain_id = did;
|
||||
tag->pasid = pasid;
|
||||
tag->users = 1;
|
||||
|
||||
if (type == CACHE_TAG_DEVTLB || type == CACHE_TAG_NESTING_DEVTLB)
|
||||
tag->dev = dev;
|
||||
else
|
||||
tag->dev = iommu->iommu.dev;
|
||||
|
||||
spin_lock_irqsave(&domain->cache_lock, flags);
|
||||
list_for_each_entry(temp, &domain->cache_tags, node) {
|
||||
if (cache_tage_match(temp, did, iommu, dev, pasid, type)) {
|
||||
temp->users++;
|
||||
spin_unlock_irqrestore(&domain->cache_lock, flags);
|
||||
kfree(tag);
|
||||
trace_cache_tag_assign(temp);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
list_add_tail(&tag->node, &domain->cache_tags);
|
||||
spin_unlock_irqrestore(&domain->cache_lock, flags);
|
||||
trace_cache_tag_assign(tag);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Unassign a cache tag with specified type from domain. */
|
||||
static void cache_tag_unassign(struct dmar_domain *domain, u16 did,
|
||||
struct device *dev, ioasid_t pasid,
|
||||
enum cache_tag_type type)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
struct cache_tag *tag;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&domain->cache_lock, flags);
|
||||
list_for_each_entry(tag, &domain->cache_tags, node) {
|
||||
if (cache_tage_match(tag, did, iommu, dev, pasid, type)) {
|
||||
trace_cache_tag_unassign(tag);
|
||||
if (--tag->users == 0) {
|
||||
list_del(&tag->node);
|
||||
kfree(tag);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->cache_lock, flags);
|
||||
}
|
||||
|
||||
static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
int ret;
|
||||
|
||||
ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
|
||||
if (ret || !info->ats_enabled)
|
||||
return ret;
|
||||
|
||||
ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_DEVTLB);
|
||||
if (ret)
|
||||
cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __cache_tag_unassign_domain(struct dmar_domain *domain, u16 did,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
|
||||
cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
|
||||
|
||||
if (info->ats_enabled)
|
||||
cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_DEVTLB);
|
||||
}
|
||||
|
||||
static int __cache_tag_assign_parent_domain(struct dmar_domain *domain, u16 did,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
int ret;
|
||||
|
||||
ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
|
||||
if (ret || !info->ats_enabled)
|
||||
return ret;
|
||||
|
||||
ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_DEVTLB);
|
||||
if (ret)
|
||||
cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __cache_tag_unassign_parent_domain(struct dmar_domain *domain, u16 did,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
|
||||
cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
|
||||
|
||||
if (info->ats_enabled)
|
||||
cache_tag_unassign(domain, did, dev, pasid, CACHE_TAG_NESTING_DEVTLB);
|
||||
}
|
||||
|
||||
static u16 domain_get_id_for_dev(struct dmar_domain *domain, struct device *dev)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
|
||||
/*
|
||||
* The driver assigns different domain IDs for all domains except
|
||||
* the SVA type.
|
||||
*/
|
||||
if (domain->domain.type == IOMMU_DOMAIN_SVA)
|
||||
return FLPT_DEFAULT_DID;
|
||||
|
||||
return domain_id_iommu(domain, iommu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Assign cache tags to a domain when it's associated with a device's
|
||||
* PASID using a specific domain ID.
|
||||
*
|
||||
* On success (return value of 0), cache tags are created and added to the
|
||||
* domain's cache tag list. On failure (negative return value), an error
|
||||
* code is returned indicating the reason for the failure.
|
||||
*/
|
||||
int cache_tag_assign_domain(struct dmar_domain *domain,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
u16 did = domain_get_id_for_dev(domain, dev);
|
||||
int ret;
|
||||
|
||||
ret = __cache_tag_assign_domain(domain, did, dev, pasid);
|
||||
if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED)
|
||||
return ret;
|
||||
|
||||
ret = __cache_tag_assign_parent_domain(domain->s2_domain, did, dev, pasid);
|
||||
if (ret)
|
||||
__cache_tag_unassign_domain(domain, did, dev, pasid);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the cache tags associated with a device's PASID when the domain is
|
||||
* detached from the device.
|
||||
*
|
||||
* The cache tags must be previously assigned to the domain by calling the
|
||||
* assign interface.
|
||||
*/
|
||||
void cache_tag_unassign_domain(struct dmar_domain *domain,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
u16 did = domain_get_id_for_dev(domain, dev);
|
||||
|
||||
__cache_tag_unassign_domain(domain, did, dev, pasid);
|
||||
if (domain->domain.type == IOMMU_DOMAIN_NESTED)
|
||||
__cache_tag_unassign_parent_domain(domain->s2_domain, did, dev, pasid);
|
||||
}
|
||||
|
||||
static unsigned long calculate_psi_aligned_address(unsigned long start,
|
||||
unsigned long end,
|
||||
unsigned long *_pages,
|
||||
unsigned long *_mask)
|
||||
{
|
||||
unsigned long pages = aligned_nrpages(start, end - start + 1);
|
||||
unsigned long aligned_pages = __roundup_pow_of_two(pages);
|
||||
unsigned long bitmask = aligned_pages - 1;
|
||||
unsigned long mask = ilog2(aligned_pages);
|
||||
unsigned long pfn = IOVA_PFN(start);
|
||||
|
||||
/*
|
||||
* PSI masks the low order bits of the base address. If the
|
||||
* address isn't aligned to the mask, then compute a mask value
|
||||
* needed to ensure the target range is flushed.
|
||||
*/
|
||||
if (unlikely(bitmask & pfn)) {
|
||||
unsigned long end_pfn = pfn + pages - 1, shared_bits;
|
||||
|
||||
/*
|
||||
* Since end_pfn <= pfn + bitmask, the only way bits
|
||||
* higher than bitmask can differ in pfn and end_pfn is
|
||||
* by carrying. This means after masking out bitmask,
|
||||
* high bits starting with the first set bit in
|
||||
* shared_bits are all equal in both pfn and end_pfn.
|
||||
*/
|
||||
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
|
||||
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
|
||||
}
|
||||
|
||||
*_pages = aligned_pages;
|
||||
*_mask = mask;
|
||||
|
||||
return ALIGN_DOWN(start, VTD_PAGE_SIZE << mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidates a range of IOVA from @start (inclusive) to @end (inclusive)
|
||||
* when the memory mappings in the target domain have been modified.
|
||||
*/
|
||||
void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
|
||||
unsigned long end, int ih)
|
||||
{
|
||||
unsigned long pages, mask, addr;
|
||||
struct cache_tag *tag;
|
||||
unsigned long flags;
|
||||
|
||||
addr = calculate_psi_aligned_address(start, end, &pages, &mask);
|
||||
|
||||
spin_lock_irqsave(&domain->cache_lock, flags);
|
||||
list_for_each_entry(tag, &domain->cache_tags, node) {
|
||||
struct intel_iommu *iommu = tag->iommu;
|
||||
struct device_domain_info *info;
|
||||
u16 sid;
|
||||
|
||||
switch (tag->type) {
|
||||
case CACHE_TAG_IOTLB:
|
||||
case CACHE_TAG_NESTING_IOTLB:
|
||||
if (domain->use_first_level) {
|
||||
qi_flush_piotlb(iommu, tag->domain_id,
|
||||
tag->pasid, addr, pages, ih);
|
||||
} else {
|
||||
/*
|
||||
* Fallback to domain selective flush if no
|
||||
* PSI support or the size is too big.
|
||||
*/
|
||||
if (!cap_pgsel_inv(iommu->cap) ||
|
||||
mask > cap_max_amask_val(iommu->cap))
|
||||
iommu->flush.flush_iotlb(iommu, tag->domain_id,
|
||||
0, 0, DMA_TLB_DSI_FLUSH);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, tag->domain_id,
|
||||
addr | ih, mask,
|
||||
DMA_TLB_PSI_FLUSH);
|
||||
}
|
||||
break;
|
||||
case CACHE_TAG_NESTING_DEVTLB:
|
||||
/*
|
||||
* Address translation cache in device side caches the
|
||||
* result of nested translation. There is no easy way
|
||||
* to identify the exact set of nested translations
|
||||
* affected by a change in S2. So just flush the entire
|
||||
* device cache.
|
||||
*/
|
||||
addr = 0;
|
||||
mask = MAX_AGAW_PFN_WIDTH;
|
||||
fallthrough;
|
||||
case CACHE_TAG_DEVTLB:
|
||||
info = dev_iommu_priv_get(tag->dev);
|
||||
sid = PCI_DEVID(info->bus, info->devfn);
|
||||
|
||||
if (tag->pasid == IOMMU_NO_PASID)
|
||||
qi_flush_dev_iotlb(iommu, sid, info->pfsid,
|
||||
info->ats_qdep, addr, mask);
|
||||
else
|
||||
qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid,
|
||||
tag->pasid, info->ats_qdep,
|
||||
addr, mask);
|
||||
|
||||
quirk_extra_dev_tlb_flush(info, addr, mask, tag->pasid, info->ats_qdep);
|
||||
break;
|
||||
}
|
||||
|
||||
trace_cache_tag_flush_range(tag, start, end, addr, pages, mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->cache_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidates all ranges of IOVA when the memory mappings in the target
|
||||
* domain have been modified.
|
||||
*/
|
||||
void cache_tag_flush_all(struct dmar_domain *domain)
|
||||
{
|
||||
struct cache_tag *tag;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&domain->cache_lock, flags);
|
||||
list_for_each_entry(tag, &domain->cache_tags, node) {
|
||||
struct intel_iommu *iommu = tag->iommu;
|
||||
struct device_domain_info *info;
|
||||
u16 sid;
|
||||
|
||||
switch (tag->type) {
|
||||
case CACHE_TAG_IOTLB:
|
||||
case CACHE_TAG_NESTING_IOTLB:
|
||||
if (domain->use_first_level)
|
||||
qi_flush_piotlb(iommu, tag->domain_id,
|
||||
tag->pasid, 0, -1, 0);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, tag->domain_id,
|
||||
0, 0, DMA_TLB_DSI_FLUSH);
|
||||
break;
|
||||
case CACHE_TAG_DEVTLB:
|
||||
case CACHE_TAG_NESTING_DEVTLB:
|
||||
info = dev_iommu_priv_get(tag->dev);
|
||||
sid = PCI_DEVID(info->bus, info->devfn);
|
||||
|
||||
qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
|
||||
0, MAX_AGAW_PFN_WIDTH);
|
||||
quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH,
|
||||
IOMMU_NO_PASID, info->ats_qdep);
|
||||
break;
|
||||
}
|
||||
|
||||
trace_cache_tag_flush_all(tag);
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->cache_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate a range of IOVA when new mappings are created in the target
|
||||
* domain.
|
||||
*
|
||||
* - VT-d spec, Section 6.1 Caching Mode: When the CM field is reported as
|
||||
* Set, any software updates to remapping structures other than first-
|
||||
* stage mapping requires explicit invalidation of the caches.
|
||||
* - VT-d spec, Section 6.8 Write Buffer Flushing: For hardware that requires
|
||||
* write buffer flushing, software must explicitly perform write-buffer
|
||||
* flushing, if cache invalidation is not required.
|
||||
*/
|
||||
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long pages, mask, addr;
|
||||
struct cache_tag *tag;
|
||||
unsigned long flags;
|
||||
|
||||
addr = calculate_psi_aligned_address(start, end, &pages, &mask);
|
||||
|
||||
spin_lock_irqsave(&domain->cache_lock, flags);
|
||||
list_for_each_entry(tag, &domain->cache_tags, node) {
|
||||
struct intel_iommu *iommu = tag->iommu;
|
||||
|
||||
if (!cap_caching_mode(iommu->cap) || domain->use_first_level) {
|
||||
iommu_flush_write_buffer(iommu);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tag->type == CACHE_TAG_IOTLB ||
|
||||
tag->type == CACHE_TAG_NESTING_IOTLB) {
|
||||
/*
|
||||
* Fallback to domain selective flush if no
|
||||
* PSI support or the size is too big.
|
||||
*/
|
||||
if (!cap_pgsel_inv(iommu->cap) ||
|
||||
mask > cap_max_amask_val(iommu->cap))
|
||||
iommu->flush.flush_iotlb(iommu, tag->domain_id,
|
||||
0, 0, DMA_TLB_DSI_FLUSH);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, tag->domain_id,
|
||||
addr, mask,
|
||||
DMA_TLB_PSI_FLUSH);
|
||||
}
|
||||
|
||||
trace_cache_tag_flush_range_np(tag, start, end, addr, pages, mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->cache_lock, flags);
|
||||
}
|
@ -706,7 +706,6 @@ static ssize_t dmar_perf_latency_write(struct file *filp,
|
||||
dmar_latency_disable(iommu, DMAR_LATENCY_INV_IOTLB);
|
||||
dmar_latency_disable(iommu, DMAR_LATENCY_INV_DEVTLB);
|
||||
dmar_latency_disable(iommu, DMAR_LATENCY_INV_IEC);
|
||||
dmar_latency_disable(iommu, DMAR_LATENCY_PRQ);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
@ -728,12 +727,6 @@ static ssize_t dmar_perf_latency_write(struct file *filp,
|
||||
dmar_latency_enable(iommu, DMAR_LATENCY_INV_IEC);
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
case 4:
|
||||
rcu_read_lock();
|
||||
for_each_active_iommu(iommu, drhd)
|
||||
dmar_latency_enable(iommu, DMAR_LATENCY_PRQ);
|
||||
rcu_read_unlock();
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -32,6 +32,7 @@
|
||||
|
||||
#include "iommu.h"
|
||||
#include "../irq_remapping.h"
|
||||
#include "../iommu-pages.h"
|
||||
#include "perf.h"
|
||||
#include "trace.h"
|
||||
#include "perfmon.h"
|
||||
@ -1067,7 +1068,6 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
|
||||
goto error_free_seq_id;
|
||||
}
|
||||
|
||||
err = -EINVAL;
|
||||
if (!cap_sagaw(iommu->cap) &&
|
||||
(!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) {
|
||||
pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
|
||||
@ -1187,7 +1187,7 @@ static void free_iommu(struct intel_iommu *iommu)
|
||||
}
|
||||
|
||||
if (iommu->qi) {
|
||||
free_page((unsigned long)iommu->qi->desc);
|
||||
iommu_free_page(iommu->qi->desc);
|
||||
kfree(iommu->qi->desc_status);
|
||||
kfree(iommu->qi);
|
||||
}
|
||||
@ -1755,7 +1755,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
|
||||
int dmar_enable_qi(struct intel_iommu *iommu)
|
||||
{
|
||||
struct q_inval *qi;
|
||||
struct page *desc_page;
|
||||
void *desc;
|
||||
int order;
|
||||
|
||||
if (!ecap_qis(iommu->ecap))
|
||||
return -ENOENT;
|
||||
@ -1776,19 +1777,19 @@ int dmar_enable_qi(struct intel_iommu *iommu)
|
||||
* Need two pages to accommodate 256 descriptors of 256 bits each
|
||||
* if the remapping hardware supports scalable mode translation.
|
||||
*/
|
||||
desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
|
||||
!!ecap_smts(iommu->ecap));
|
||||
if (!desc_page) {
|
||||
order = ecap_smts(iommu->ecap) ? 1 : 0;
|
||||
desc = iommu_alloc_pages_node(iommu->node, GFP_ATOMIC, order);
|
||||
if (!desc) {
|
||||
kfree(qi);
|
||||
iommu->qi = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
qi->desc = page_address(desc_page);
|
||||
qi->desc = desc;
|
||||
|
||||
qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC);
|
||||
if (!qi->desc_status) {
|
||||
free_page((unsigned long) qi->desc);
|
||||
iommu_free_page(qi->desc);
|
||||
kfree(qi);
|
||||
iommu->qi = NULL;
|
||||
return -ENOMEM;
|
||||
@ -2122,7 +2123,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __init enable_drhd_fault_handling(void)
|
||||
int enable_drhd_fault_handling(unsigned int cpu)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
@ -2132,7 +2133,12 @@ int __init enable_drhd_fault_handling(void)
|
||||
*/
|
||||
for_each_iommu(iommu, drhd) {
|
||||
u32 fault_status;
|
||||
int ret = dmar_set_interrupt(iommu);
|
||||
int ret;
|
||||
|
||||
if (iommu->irq || iommu->node != cpu_to_node(cpu))
|
||||
continue;
|
||||
|
||||
ret = dmar_set_interrupt(iommu);
|
||||
|
||||
if (ret) {
|
||||
pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "iommu.h"
|
||||
#include "../dma-iommu.h"
|
||||
#include "../irq_remapping.h"
|
||||
#include "../iommu-pages.h"
|
||||
#include "pasid.h"
|
||||
#include "cap_audit.h"
|
||||
#include "perfmon.h"
|
||||
@ -54,11 +55,6 @@
|
||||
__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
|
||||
#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
|
||||
|
||||
/* IO virtual address start page frame number */
|
||||
#define IOVA_START_PFN (1)
|
||||
|
||||
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
||||
|
||||
static void __init check_tylersburg_isoch(void);
|
||||
static int rwbf_quirk;
|
||||
|
||||
@ -221,12 +217,11 @@ int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
|
||||
int intel_iommu_enabled = 0;
|
||||
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
|
||||
|
||||
static int dmar_map_gfx = 1;
|
||||
static int intel_iommu_superpage = 1;
|
||||
static int iommu_identity_mapping;
|
||||
static int iommu_skip_te_disable;
|
||||
static int disable_igfx_iommu;
|
||||
|
||||
#define IDENTMAP_GFX 2
|
||||
#define IDENTMAP_AZALIA 4
|
||||
|
||||
const struct iommu_ops intel_iommu_ops;
|
||||
@ -265,7 +260,7 @@ static int __init intel_iommu_setup(char *str)
|
||||
no_platform_optin = 1;
|
||||
pr_info("IOMMU disabled\n");
|
||||
} else if (!strncmp(str, "igfx_off", 8)) {
|
||||
dmar_map_gfx = 0;
|
||||
disable_igfx_iommu = 1;
|
||||
pr_info("Disable GFX device mapping\n");
|
||||
} else if (!strncmp(str, "forcedac", 8)) {
|
||||
pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
|
||||
@ -298,22 +293,6 @@ static int __init intel_iommu_setup(char *str)
|
||||
}
|
||||
__setup("intel_iommu=", intel_iommu_setup);
|
||||
|
||||
void *alloc_pgtable_page(int node, gfp_t gfp)
|
||||
{
|
||||
struct page *page;
|
||||
void *vaddr = NULL;
|
||||
|
||||
page = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
|
||||
if (page)
|
||||
vaddr = page_address(page);
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
void free_pgtable_page(void *vaddr)
|
||||
{
|
||||
free_page((unsigned long)vaddr);
|
||||
}
|
||||
|
||||
static int domain_type_is_si(struct dmar_domain *domain)
|
||||
{
|
||||
return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
|
||||
@ -545,7 +524,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
|
||||
if (!alloc)
|
||||
return NULL;
|
||||
|
||||
context = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
|
||||
context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
|
||||
if (!context)
|
||||
return NULL;
|
||||
|
||||
@ -719,17 +698,17 @@ static void free_context_table(struct intel_iommu *iommu)
|
||||
for (i = 0; i < ROOT_ENTRY_NR; i++) {
|
||||
context = iommu_context_addr(iommu, i, 0, 0);
|
||||
if (context)
|
||||
free_pgtable_page(context);
|
||||
iommu_free_page(context);
|
||||
|
||||
if (!sm_supported(iommu))
|
||||
continue;
|
||||
|
||||
context = iommu_context_addr(iommu, i, 0x80, 0);
|
||||
if (context)
|
||||
free_pgtable_page(context);
|
||||
iommu_free_page(context);
|
||||
}
|
||||
|
||||
free_pgtable_page(iommu->root_entry);
|
||||
iommu_free_page(iommu->root_entry);
|
||||
iommu->root_entry = NULL;
|
||||
}
|
||||
|
||||
@ -865,9 +844,9 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
|
||||
break;
|
||||
|
||||
if (!dma_pte_present(pte)) {
|
||||
uint64_t pteval;
|
||||
uint64_t pteval, tmp;
|
||||
|
||||
tmp_page = alloc_pgtable_page(domain->nid, gfp);
|
||||
tmp_page = iommu_alloc_page_node(domain->nid, gfp);
|
||||
|
||||
if (!tmp_page)
|
||||
return NULL;
|
||||
@ -877,9 +856,10 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
|
||||
if (domain->use_first_level)
|
||||
pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
|
||||
|
||||
if (cmpxchg64(&pte->val, 0ULL, pteval))
|
||||
tmp = 0ULL;
|
||||
if (!try_cmpxchg64(&pte->val, &tmp, pteval))
|
||||
/* Someone else set it while we were thinking; use theirs. */
|
||||
free_pgtable_page(tmp_page);
|
||||
iommu_free_page(tmp_page);
|
||||
else
|
||||
domain_flush_cache(domain, pte, sizeof(*pte));
|
||||
}
|
||||
@ -992,7 +972,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
|
||||
last_pfn < level_pfn + level_size(level) - 1)) {
|
||||
dma_clear_pte(pte);
|
||||
domain_flush_cache(domain, pte, sizeof(*pte));
|
||||
free_pgtable_page(level_pte);
|
||||
iommu_free_page(level_pte);
|
||||
}
|
||||
next:
|
||||
pfn += level_size(level);
|
||||
@ -1016,7 +996,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
|
||||
|
||||
/* free pgd */
|
||||
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
|
||||
free_pgtable_page(domain->pgd);
|
||||
iommu_free_page(domain->pgd);
|
||||
domain->pgd = NULL;
|
||||
}
|
||||
}
|
||||
@ -1118,7 +1098,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
|
||||
{
|
||||
struct root_entry *root;
|
||||
|
||||
root = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
|
||||
root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC);
|
||||
if (!root) {
|
||||
pr_err("Allocating root entry for %s failed\n",
|
||||
iommu->name);
|
||||
@ -1394,197 +1374,9 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
|
||||
quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
|
||||
}
|
||||
|
||||
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
|
||||
u64 addr, unsigned mask)
|
||||
{
|
||||
struct dev_pasid_info *dev_pasid;
|
||||
struct device_domain_info *info;
|
||||
unsigned long flags;
|
||||
|
||||
if (!domain->has_iotlb_device)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
list_for_each_entry(info, &domain->devices, link)
|
||||
__iommu_flush_dev_iotlb(info, addr, mask);
|
||||
|
||||
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
|
||||
info = dev_iommu_priv_get(dev_pasid->dev);
|
||||
|
||||
if (!info->ats_enabled)
|
||||
continue;
|
||||
|
||||
qi_flush_dev_iotlb_pasid(info->iommu,
|
||||
PCI_DEVID(info->bus, info->devfn),
|
||||
info->pfsid, dev_pasid->pasid,
|
||||
info->ats_qdep, addr,
|
||||
mask);
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
|
||||
static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
|
||||
struct dmar_domain *domain, u64 addr,
|
||||
unsigned long npages, bool ih)
|
||||
{
|
||||
u16 did = domain_id_iommu(domain, iommu);
|
||||
struct dev_pasid_info *dev_pasid;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain)
|
||||
qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih);
|
||||
|
||||
if (!list_empty(&domain->devices))
|
||||
qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
|
||||
static void __iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
|
||||
unsigned long pfn, unsigned int pages,
|
||||
int ih)
|
||||
{
|
||||
unsigned int aligned_pages = __roundup_pow_of_two(pages);
|
||||
unsigned long bitmask = aligned_pages - 1;
|
||||
unsigned int mask = ilog2(aligned_pages);
|
||||
u64 addr = (u64)pfn << VTD_PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* PSI masks the low order bits of the base address. If the
|
||||
* address isn't aligned to the mask, then compute a mask value
|
||||
* needed to ensure the target range is flushed.
|
||||
*/
|
||||
if (unlikely(bitmask & pfn)) {
|
||||
unsigned long end_pfn = pfn + pages - 1, shared_bits;
|
||||
|
||||
/*
|
||||
* Since end_pfn <= pfn + bitmask, the only way bits
|
||||
* higher than bitmask can differ in pfn and end_pfn is
|
||||
* by carrying. This means after masking out bitmask,
|
||||
* high bits starting with the first set bit in
|
||||
* shared_bits are all equal in both pfn and end_pfn.
|
||||
*/
|
||||
shared_bits = ~(pfn ^ end_pfn) & ~bitmask;
|
||||
mask = shared_bits ? __ffs(shared_bits) : BITS_PER_LONG;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback to domain selective flush if no PSI support or
|
||||
* the size is too big.
|
||||
*/
|
||||
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||
DMA_TLB_DSI_FLUSH);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
|
||||
DMA_TLB_PSI_FLUSH);
|
||||
}
|
||||
|
||||
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
|
||||
struct dmar_domain *domain,
|
||||
unsigned long pfn, unsigned int pages,
|
||||
int ih, int map)
|
||||
{
|
||||
unsigned int aligned_pages = __roundup_pow_of_two(pages);
|
||||
unsigned int mask = ilog2(aligned_pages);
|
||||
uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
|
||||
u16 did = domain_id_iommu(domain, iommu);
|
||||
|
||||
if (WARN_ON(!pages))
|
||||
return;
|
||||
|
||||
if (ih)
|
||||
ih = 1 << 6;
|
||||
|
||||
if (domain->use_first_level)
|
||||
domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
|
||||
else
|
||||
__iommu_flush_iotlb_psi(iommu, did, pfn, pages, ih);
|
||||
|
||||
/*
|
||||
* In caching mode, changes of pages from non-present to present require
|
||||
* flush. However, device IOTLB doesn't need to be flushed in this case.
|
||||
*/
|
||||
if (!cap_caching_mode(iommu->cap) || !map)
|
||||
iommu_flush_dev_iotlb(domain, addr, mask);
|
||||
}
|
||||
|
||||
/* Notification for newly created mappings */
|
||||
static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain,
|
||||
unsigned long pfn, unsigned int pages)
|
||||
{
|
||||
/*
|
||||
* It's a non-present to present mapping. Only flush if caching mode
|
||||
* and second level.
|
||||
*/
|
||||
if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
|
||||
iommu_flush_iotlb_psi(iommu, domain, pfn, pages, 0, 1);
|
||||
else
|
||||
iommu_flush_write_buffer(iommu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the relevant caches in nested translation if the domain
|
||||
* also serves as a parent
|
||||
*/
|
||||
static void parent_domain_flush(struct dmar_domain *domain,
|
||||
unsigned long pfn,
|
||||
unsigned long pages, int ih)
|
||||
{
|
||||
struct dmar_domain *s1_domain;
|
||||
|
||||
spin_lock(&domain->s1_lock);
|
||||
list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) {
|
||||
struct device_domain_info *device_info;
|
||||
struct iommu_domain_info *info;
|
||||
unsigned long flags;
|
||||
unsigned long i;
|
||||
|
||||
xa_for_each(&s1_domain->iommu_array, i, info)
|
||||
__iommu_flush_iotlb_psi(info->iommu, info->did,
|
||||
pfn, pages, ih);
|
||||
|
||||
if (!s1_domain->has_iotlb_device)
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&s1_domain->lock, flags);
|
||||
list_for_each_entry(device_info, &s1_domain->devices, link)
|
||||
/*
|
||||
* Address translation cache in device side caches the
|
||||
* result of nested translation. There is no easy way
|
||||
* to identify the exact set of nested translations
|
||||
* affected by a change in S2. So just flush the entire
|
||||
* device cache.
|
||||
*/
|
||||
__iommu_flush_dev_iotlb(device_info, 0,
|
||||
MAX_AGAW_PFN_WIDTH);
|
||||
spin_unlock_irqrestore(&s1_domain->lock, flags);
|
||||
}
|
||||
spin_unlock(&domain->s1_lock);
|
||||
}
|
||||
|
||||
static void intel_flush_iotlb_all(struct iommu_domain *domain)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
struct iommu_domain_info *info;
|
||||
unsigned long idx;
|
||||
|
||||
xa_for_each(&dmar_domain->iommu_array, idx, info) {
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
u16 did = domain_id_iommu(dmar_domain, iommu);
|
||||
|
||||
if (dmar_domain->use_first_level)
|
||||
domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0);
|
||||
else
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||
DMA_TLB_DSI_FLUSH);
|
||||
|
||||
if (!cap_caching_mode(iommu->cap))
|
||||
iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
|
||||
}
|
||||
|
||||
if (dmar_domain->nested_parent)
|
||||
parent_domain_flush(dmar_domain, 0, -1, 0);
|
||||
cache_tag_flush_all(to_dmar_domain(domain));
|
||||
}
|
||||
|
||||
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
|
||||
@ -1750,7 +1542,9 @@ static struct dmar_domain *alloc_domain(unsigned int type)
|
||||
domain->has_iotlb_device = false;
|
||||
INIT_LIST_HEAD(&domain->devices);
|
||||
INIT_LIST_HEAD(&domain->dev_pasids);
|
||||
INIT_LIST_HEAD(&domain->cache_tags);
|
||||
spin_lock_init(&domain->lock);
|
||||
spin_lock_init(&domain->cache_lock);
|
||||
xa_init(&domain->iommu_array);
|
||||
|
||||
return domain;
|
||||
@ -1762,6 +1556,9 @@ int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
|
||||
unsigned long ndomains;
|
||||
int num, ret = -ENOSPC;
|
||||
|
||||
if (domain->domain.type == IOMMU_DOMAIN_SVA)
|
||||
return 0;
|
||||
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
@ -1809,6 +1606,9 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
|
||||
{
|
||||
struct iommu_domain_info *info;
|
||||
|
||||
if (domain->domain.type == IOMMU_DOMAIN_SVA)
|
||||
return;
|
||||
|
||||
spin_lock(&iommu->lock);
|
||||
info = xa_load(&domain->iommu_array, iommu->seq_id);
|
||||
if (--info->refcnt == 0) {
|
||||
@ -1841,7 +1641,7 @@ static void domain_exit(struct dmar_domain *domain)
|
||||
LIST_HEAD(freelist);
|
||||
|
||||
domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
|
||||
put_pages_list(&freelist);
|
||||
iommu_put_pages_list(&freelist);
|
||||
}
|
||||
|
||||
if (WARN_ON(!list_empty(&domain->devices)))
|
||||
@ -1988,13 +1788,6 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
|
||||
domain_context_mapping_cb, domain);
|
||||
}
|
||||
|
||||
/* Returns a number of VTD pages, but aligned to MM page size */
|
||||
static unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
|
||||
{
|
||||
host_addr &= ~PAGE_MASK;
|
||||
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* Return largest possible superpage level for a given mapping */
|
||||
static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||
unsigned long phy_pfn, unsigned long pages)
|
||||
@ -2031,9 +1824,7 @@ static void switch_to_super_page(struct dmar_domain *domain,
|
||||
unsigned long end_pfn, int level)
|
||||
{
|
||||
unsigned long lvl_pages = lvl_to_nr_pages(level);
|
||||
struct iommu_domain_info *info;
|
||||
struct dma_pte *pte = NULL;
|
||||
unsigned long i;
|
||||
|
||||
while (start_pfn <= end_pfn) {
|
||||
if (!pte)
|
||||
@ -2045,13 +1836,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
|
||||
start_pfn + lvl_pages - 1,
|
||||
level + 1);
|
||||
|
||||
xa_for_each(&domain->iommu_array, i, info)
|
||||
iommu_flush_iotlb_psi(info->iommu, domain,
|
||||
start_pfn, lvl_pages,
|
||||
0, 0);
|
||||
if (domain->nested_parent)
|
||||
parent_domain_flush(domain, start_pfn,
|
||||
lvl_pages, 0);
|
||||
cache_tag_flush_range(domain, start_pfn << VTD_PAGE_SHIFT,
|
||||
end_pfn << VTD_PAGE_SHIFT, 0);
|
||||
}
|
||||
|
||||
pte++;
|
||||
@ -2128,8 +1914,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||
/* We don't need lock here, nobody else
|
||||
* touches the iova range
|
||||
*/
|
||||
tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
|
||||
if (tmp) {
|
||||
tmp = 0ULL;
|
||||
if (!try_cmpxchg64_local(&pte->val, &tmp, pteval)) {
|
||||
static int dumps = 5;
|
||||
pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
|
||||
iov_pfn, tmp, (unsigned long long)pteval);
|
||||
@ -2327,6 +2113,13 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
|
||||
ret = domain_attach_iommu(domain, iommu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
|
||||
if (ret) {
|
||||
domain_detach_iommu(domain, iommu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
info->domain = domain;
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
list_add(&info->link, &domain->devices);
|
||||
@ -2402,9 +2195,6 @@ static int device_def_domain_type(struct device *dev)
|
||||
|
||||
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
|
||||
return IOMMU_DOMAIN_IDENTITY;
|
||||
|
||||
if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
|
||||
return IOMMU_DOMAIN_IDENTITY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -2497,7 +2287,7 @@ static int copy_context_table(struct intel_iommu *iommu,
|
||||
if (!old_ce)
|
||||
goto out;
|
||||
|
||||
new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL);
|
||||
new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL);
|
||||
if (!new_ce)
|
||||
goto out_unmap;
|
||||
|
||||
@ -2705,9 +2495,6 @@ static int __init init_dmars(void)
|
||||
iommu_set_root_entry(iommu);
|
||||
}
|
||||
|
||||
if (!dmar_map_gfx)
|
||||
iommu_identity_mapping |= IDENTMAP_GFX;
|
||||
|
||||
check_tylersburg_isoch();
|
||||
|
||||
ret = si_domain_init(hw_pass_through);
|
||||
@ -2798,7 +2585,7 @@ static void __init init_no_remapping_devices(void)
|
||||
/* This IOMMU has *only* gfx devices. Either bypass it or
|
||||
set the gfx_mapped flag, as appropriate */
|
||||
drhd->gfx_dedicated = 1;
|
||||
if (!dmar_map_gfx)
|
||||
if (disable_igfx_iommu)
|
||||
drhd->ignored = 1;
|
||||
}
|
||||
}
|
||||
@ -3414,19 +3201,10 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
|
||||
case MEM_OFFLINE:
|
||||
case MEM_CANCEL_ONLINE:
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
LIST_HEAD(freelist);
|
||||
|
||||
domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_active_iommu(iommu, drhd)
|
||||
iommu_flush_iotlb_psi(iommu, si_domain,
|
||||
start_vpfn, mhp->nr_pages,
|
||||
list_empty(&freelist), 0);
|
||||
rcu_read_unlock();
|
||||
put_pages_list(&freelist);
|
||||
iommu_put_pages_list(&freelist);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -3815,6 +3593,7 @@ void device_block_translation(struct device *dev)
|
||||
list_del(&info->link);
|
||||
spin_unlock_irqrestore(&info->domain->lock, flags);
|
||||
|
||||
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
|
||||
domain_detach_iommu(info->domain, iommu);
|
||||
info->domain = NULL;
|
||||
}
|
||||
@ -3833,7 +3612,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
|
||||
domain->max_addr = 0;
|
||||
|
||||
/* always allocate the top pgd */
|
||||
domain->pgd = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
|
||||
domain->pgd = iommu_alloc_page_node(domain->nid, GFP_ATOMIC);
|
||||
if (!domain->pgd)
|
||||
return -ENOMEM;
|
||||
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
|
||||
@ -3882,8 +3661,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
|
||||
return domain;
|
||||
case IOMMU_DOMAIN_IDENTITY:
|
||||
return &si_domain->domain;
|
||||
case IOMMU_DOMAIN_SVA:
|
||||
return intel_svm_domain_alloc();
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
@ -3987,7 +3764,7 @@ int prepare_domain_attach_device(struct iommu_domain *domain,
|
||||
pte = dmar_domain->pgd;
|
||||
if (dma_pte_present(pte)) {
|
||||
dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte));
|
||||
free_pgtable_page(pte);
|
||||
iommu_free_page(pte);
|
||||
}
|
||||
dmar_domain->agaw--;
|
||||
}
|
||||
@ -4122,26 +3899,9 @@ static size_t intel_iommu_unmap_pages(struct iommu_domain *domain,
|
||||
static void intel_iommu_tlb_sync(struct iommu_domain *domain,
|
||||
struct iommu_iotlb_gather *gather)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
unsigned long iova_pfn = IOVA_PFN(gather->start);
|
||||
size_t size = gather->end - gather->start;
|
||||
struct iommu_domain_info *info;
|
||||
unsigned long start_pfn;
|
||||
unsigned long nrpages;
|
||||
unsigned long i;
|
||||
|
||||
nrpages = aligned_nrpages(gather->start, size);
|
||||
start_pfn = mm_to_dma_pfn_start(iova_pfn);
|
||||
|
||||
xa_for_each(&dmar_domain->iommu_array, i, info)
|
||||
iommu_flush_iotlb_psi(info->iommu, dmar_domain,
|
||||
start_pfn, nrpages,
|
||||
list_empty(&gather->freelist), 0);
|
||||
|
||||
if (dmar_domain->nested_parent)
|
||||
parent_domain_flush(dmar_domain, start_pfn, nrpages,
|
||||
list_empty(&gather->freelist));
|
||||
put_pages_list(&gather->freelist);
|
||||
cache_tag_flush_range(to_dmar_domain(domain), gather->start,
|
||||
gather->end, list_empty(&gather->freelist));
|
||||
iommu_put_pages_list(&gather->freelist);
|
||||
}
|
||||
|
||||
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
|
||||
@ -4352,12 +4112,6 @@ static void intel_iommu_release_device(struct device *dev)
|
||||
set_dma_ops(dev, NULL);
|
||||
}
|
||||
|
||||
static void intel_iommu_probe_finalize(struct device *dev)
|
||||
{
|
||||
set_dma_ops(dev, NULL);
|
||||
iommu_setup_dma_ops(dev, 0, U64_MAX);
|
||||
}
|
||||
|
||||
static void intel_iommu_get_resv_regions(struct device *device,
|
||||
struct list_head *head)
|
||||
{
|
||||
@ -4579,41 +4333,20 @@ static bool risky_device(struct pci_dev *pdev)
|
||||
static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
unsigned long pages = aligned_nrpages(iova, size);
|
||||
unsigned long pfn = iova >> VTD_PAGE_SHIFT;
|
||||
struct iommu_domain_info *info;
|
||||
unsigned long i;
|
||||
cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1);
|
||||
|
||||
xa_for_each(&dmar_domain->iommu_array, i, info)
|
||||
__mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
|
||||
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
|
||||
struct iommu_domain *domain)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
struct dev_pasid_info *curr, *dev_pasid = NULL;
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
struct dmar_domain *dmar_domain;
|
||||
struct iommu_domain *domain;
|
||||
unsigned long flags;
|
||||
|
||||
domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
|
||||
if (WARN_ON_ONCE(!domain))
|
||||
goto out_tear_down;
|
||||
|
||||
/*
|
||||
* The SVA implementation needs to handle its own stuffs like the mm
|
||||
* notification. Before consolidating that code into iommu core, let
|
||||
* the intel sva code handle it.
|
||||
*/
|
||||
if (domain->type == IOMMU_DOMAIN_SVA) {
|
||||
intel_svm_remove_dev_pasid(dev, pasid);
|
||||
goto out_tear_down;
|
||||
}
|
||||
|
||||
dmar_domain = to_dmar_domain(domain);
|
||||
spin_lock_irqsave(&dmar_domain->lock, flags);
|
||||
list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
|
||||
if (curr->dev == dev && curr->pasid == pasid) {
|
||||
@ -4625,10 +4358,10 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
|
||||
WARN_ON_ONCE(!dev_pasid);
|
||||
spin_unlock_irqrestore(&dmar_domain->lock, flags);
|
||||
|
||||
cache_tag_unassign_domain(dmar_domain, dev, pasid);
|
||||
domain_detach_iommu(dmar_domain, iommu);
|
||||
intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
|
||||
kfree(dev_pasid);
|
||||
out_tear_down:
|
||||
intel_pasid_tear_down_entry(iommu, dev, pasid, false);
|
||||
intel_drain_pasid_prq(dev, pasid);
|
||||
}
|
||||
@ -4664,6 +4397,10 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
ret = cache_tag_assign_domain(dmar_domain, dev, pasid);
|
||||
if (ret)
|
||||
goto out_detach_iommu;
|
||||
|
||||
if (domain_type_is_si(dmar_domain))
|
||||
ret = intel_pasid_setup_pass_through(iommu, dev, pasid);
|
||||
else if (dmar_domain->use_first_level)
|
||||
@ -4673,7 +4410,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
|
||||
ret = intel_pasid_setup_second_level(iommu, dmar_domain,
|
||||
dev, pasid);
|
||||
if (ret)
|
||||
goto out_detach_iommu;
|
||||
goto out_unassign_tag;
|
||||
|
||||
dev_pasid->dev = dev;
|
||||
dev_pasid->pasid = pasid;
|
||||
@ -4685,6 +4422,8 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
|
||||
intel_iommu_debugfs_create_dev_pasid(dev_pasid);
|
||||
|
||||
return 0;
|
||||
out_unassign_tag:
|
||||
cache_tag_unassign_domain(dmar_domain, dev, pasid);
|
||||
out_detach_iommu:
|
||||
domain_detach_iommu(dmar_domain, iommu);
|
||||
out_free:
|
||||
@ -4841,8 +4580,8 @@ const struct iommu_ops intel_iommu_ops = {
|
||||
.hw_info = intel_iommu_hw_info,
|
||||
.domain_alloc = intel_iommu_domain_alloc,
|
||||
.domain_alloc_user = intel_iommu_domain_alloc_user,
|
||||
.domain_alloc_sva = intel_svm_domain_alloc,
|
||||
.probe_device = intel_iommu_probe_device,
|
||||
.probe_finalize = intel_iommu_probe_finalize,
|
||||
.release_device = intel_iommu_release_device,
|
||||
.get_resv_regions = intel_iommu_get_resv_regions,
|
||||
.device_group = intel_iommu_device_group,
|
||||
@ -4875,7 +4614,7 @@ static void quirk_iommu_igfx(struct pci_dev *dev)
|
||||
return;
|
||||
|
||||
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
|
||||
dmar_map_gfx = 0;
|
||||
disable_igfx_iommu = 1;
|
||||
}
|
||||
|
||||
/* G4x/GM45 integrated gfx dmar support is totally busted. */
|
||||
@ -4956,8 +4695,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
|
||||
|
||||
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
|
||||
pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
|
||||
dmar_map_gfx = 0;
|
||||
} else if (dmar_map_gfx) {
|
||||
disable_igfx_iommu = 1;
|
||||
} else if (!disable_igfx_iommu) {
|
||||
/* we have to ensure the gfx device is idle before we flush */
|
||||
pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
|
||||
iommu_set_dma_strict();
|
||||
|
@ -35,6 +35,8 @@
|
||||
#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
|
||||
#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
|
||||
|
||||
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
||||
|
||||
#define VTD_STRIDE_SHIFT (9)
|
||||
#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
|
||||
|
||||
@ -455,7 +457,6 @@ enum {
|
||||
|
||||
/* Page group response descriptor QW0 */
|
||||
#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
|
||||
#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
|
||||
#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
|
||||
#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
|
||||
#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
|
||||
@ -607,6 +608,9 @@ struct dmar_domain {
|
||||
struct list_head devices; /* all devices' list */
|
||||
struct list_head dev_pasids; /* all attached pasids */
|
||||
|
||||
spinlock_t cache_lock; /* Protect the cache tag list */
|
||||
struct list_head cache_tags; /* Cache tag list */
|
||||
|
||||
int iommu_superpage;/* Level of superpages supported:
|
||||
0 == 4KiB (no superpages), 1 == 2MiB,
|
||||
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
|
||||
@ -644,6 +648,11 @@ struct dmar_domain {
|
||||
/* link to parent domain siblings */
|
||||
struct list_head s2_link;
|
||||
};
|
||||
|
||||
/* SVA domain */
|
||||
struct {
|
||||
struct mmu_notifier notifier;
|
||||
};
|
||||
};
|
||||
|
||||
struct iommu_domain domain; /* generic domain data structure for
|
||||
@ -1038,6 +1047,19 @@ static inline void context_set_sm_pre(struct context_entry *context)
|
||||
context->lo |= BIT_ULL(4);
|
||||
}
|
||||
|
||||
/* Returns a number of VTD pages, but aligned to MM page size */
|
||||
static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
|
||||
{
|
||||
host_addr &= ~PAGE_MASK;
|
||||
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* Return a size from number of VTD pages. */
|
||||
static inline unsigned long nrpages_to_size(unsigned long npages)
|
||||
{
|
||||
return npages << VTD_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* Convert value to context PASID directory size field coding. */
|
||||
#define context_pdts(pds) (((pds) & 0x7) << 9)
|
||||
|
||||
@ -1085,48 +1107,60 @@ void domain_update_iommu_cap(struct dmar_domain *domain);
|
||||
|
||||
int dmar_ir_support(void);
|
||||
|
||||
void *alloc_pgtable_page(int node, gfp_t gfp);
|
||||
void free_pgtable_page(void *vaddr);
|
||||
void iommu_flush_write_buffer(struct intel_iommu *iommu);
|
||||
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
|
||||
const struct iommu_user_data *user_data);
|
||||
struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid);
|
||||
|
||||
enum cache_tag_type {
|
||||
CACHE_TAG_IOTLB,
|
||||
CACHE_TAG_DEVTLB,
|
||||
CACHE_TAG_NESTING_IOTLB,
|
||||
CACHE_TAG_NESTING_DEVTLB,
|
||||
};
|
||||
|
||||
struct cache_tag {
|
||||
struct list_head node;
|
||||
enum cache_tag_type type;
|
||||
struct intel_iommu *iommu;
|
||||
/*
|
||||
* The @dev field represents the location of the cache. For IOTLB, it
|
||||
* resides on the IOMMU hardware. @dev stores the device pointer to
|
||||
* the IOMMU hardware. For DevTLB, it locates in the PCIe endpoint.
|
||||
* @dev stores the device pointer to that endpoint.
|
||||
*/
|
||||
struct device *dev;
|
||||
u16 domain_id;
|
||||
ioasid_t pasid;
|
||||
unsigned int users;
|
||||
};
|
||||
|
||||
int cache_tag_assign_domain(struct dmar_domain *domain,
|
||||
struct device *dev, ioasid_t pasid);
|
||||
void cache_tag_unassign_domain(struct dmar_domain *domain,
|
||||
struct device *dev, ioasid_t pasid);
|
||||
void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
|
||||
unsigned long end, int ih);
|
||||
void cache_tag_flush_all(struct dmar_domain *domain);
|
||||
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
void intel_svm_check(struct intel_iommu *iommu);
|
||||
int intel_svm_enable_prq(struct intel_iommu *iommu);
|
||||
int intel_svm_finish_prq(struct intel_iommu *iommu);
|
||||
void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
|
||||
struct iommu_page_response *msg);
|
||||
struct iommu_domain *intel_svm_domain_alloc(void);
|
||||
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
|
||||
struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
|
||||
struct mm_struct *mm);
|
||||
void intel_drain_pasid_prq(struct device *dev, u32 pasid);
|
||||
|
||||
struct intel_svm_dev {
|
||||
struct list_head list;
|
||||
struct rcu_head rcu;
|
||||
struct device *dev;
|
||||
struct intel_iommu *iommu;
|
||||
u16 did;
|
||||
u16 sid, qdep;
|
||||
};
|
||||
|
||||
struct intel_svm {
|
||||
struct mmu_notifier notifier;
|
||||
struct mm_struct *mm;
|
||||
u32 pasid;
|
||||
struct list_head devs;
|
||||
};
|
||||
#else
|
||||
static inline void intel_svm_check(struct intel_iommu *iommu) {}
|
||||
static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {}
|
||||
static inline struct iommu_domain *intel_svm_domain_alloc(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
|
||||
static inline struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
|
||||
#include "iommu.h"
|
||||
#include "../irq_remapping.h"
|
||||
#include "../iommu-pages.h"
|
||||
#include "cap_audit.h"
|
||||
|
||||
enum irq_mode {
|
||||
@ -529,7 +530,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
||||
struct ir_table *ir_table;
|
||||
struct fwnode_handle *fn;
|
||||
unsigned long *bitmap;
|
||||
struct page *pages;
|
||||
void *ir_table_base;
|
||||
|
||||
if (iommu->ir_table)
|
||||
return 0;
|
||||
@ -538,9 +539,9 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
||||
if (!ir_table)
|
||||
return -ENOMEM;
|
||||
|
||||
pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
|
||||
INTR_REMAP_PAGE_ORDER);
|
||||
if (!pages) {
|
||||
ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
|
||||
INTR_REMAP_PAGE_ORDER);
|
||||
if (!ir_table_base) {
|
||||
pr_err("IR%d: failed to allocate pages of order %d\n",
|
||||
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
|
||||
goto out_free_table;
|
||||
@ -575,7 +576,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
||||
else
|
||||
iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops;
|
||||
|
||||
ir_table->base = page_address(pages);
|
||||
ir_table->base = ir_table_base;
|
||||
ir_table->bitmap = bitmap;
|
||||
iommu->ir_table = ir_table;
|
||||
|
||||
@ -624,7 +625,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
|
||||
out_free_bitmap:
|
||||
bitmap_free(bitmap);
|
||||
out_free_pages:
|
||||
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
|
||||
iommu_free_pages(ir_table_base, INTR_REMAP_PAGE_ORDER);
|
||||
out_free_table:
|
||||
kfree(ir_table);
|
||||
|
||||
@ -645,8 +646,7 @@ static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
|
||||
irq_domain_free_fwnode(fn);
|
||||
iommu->ir_domain = NULL;
|
||||
}
|
||||
free_pages((unsigned long)iommu->ir_table->base,
|
||||
INTR_REMAP_PAGE_ORDER);
|
||||
iommu_free_pages(iommu->ir_table->base, INTR_REMAP_PAGE_ORDER);
|
||||
bitmap_free(iommu->ir_table->bitmap);
|
||||
kfree(iommu->ir_table);
|
||||
iommu->ir_table = NULL;
|
||||
|
@ -52,13 +52,14 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = cache_tag_assign_domain(dmar_domain, dev, IOMMU_NO_PASID);
|
||||
if (ret)
|
||||
goto detach_iommu;
|
||||
|
||||
ret = intel_pasid_setup_nested(iommu, dev,
|
||||
IOMMU_NO_PASID, dmar_domain);
|
||||
if (ret) {
|
||||
domain_detach_iommu(dmar_domain, iommu);
|
||||
dev_err_ratelimited(dev, "Failed to setup pasid entry\n");
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto unassign_tag;
|
||||
|
||||
info->domain = dmar_domain;
|
||||
spin_lock_irqsave(&dmar_domain->lock, flags);
|
||||
@ -68,6 +69,12 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
|
||||
domain_update_iotlb(dmar_domain);
|
||||
|
||||
return 0;
|
||||
unassign_tag:
|
||||
cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID);
|
||||
detach_iommu:
|
||||
domain_detach_iommu(dmar_domain, iommu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void intel_nested_domain_free(struct iommu_domain *domain)
|
||||
@ -81,50 +88,6 @@ static void intel_nested_domain_free(struct iommu_domain *domain)
|
||||
kfree(dmar_domain);
|
||||
}
|
||||
|
||||
static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr,
|
||||
unsigned int mask)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
unsigned long flags;
|
||||
u16 sid, qdep;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
list_for_each_entry(info, &domain->devices, link) {
|
||||
if (!info->ats_enabled)
|
||||
continue;
|
||||
sid = info->bus << 8 | info->devfn;
|
||||
qdep = info->ats_qdep;
|
||||
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
|
||||
qdep, addr, mask);
|
||||
quirk_extra_dev_tlb_flush(info, addr, mask,
|
||||
IOMMU_NO_PASID, qdep);
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
|
||||
static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr,
|
||||
u64 npages, bool ih)
|
||||
{
|
||||
struct iommu_domain_info *info;
|
||||
unsigned int mask;
|
||||
unsigned long i;
|
||||
|
||||
xa_for_each(&domain->iommu_array, i, info)
|
||||
qi_flush_piotlb(info->iommu,
|
||||
domain_id_iommu(domain, info->iommu),
|
||||
IOMMU_NO_PASID, addr, npages, ih);
|
||||
|
||||
if (!domain->has_iotlb_device)
|
||||
return;
|
||||
|
||||
if (npages == U64_MAX)
|
||||
mask = 64 - VTD_PAGE_SHIFT;
|
||||
else
|
||||
mask = ilog2(__roundup_pow_of_two(npages));
|
||||
|
||||
nested_flush_dev_iotlb(domain, addr, mask);
|
||||
}
|
||||
|
||||
static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
|
||||
struct iommu_user_data_array *array)
|
||||
{
|
||||
@ -157,9 +120,9 @@ static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
|
||||
break;
|
||||
}
|
||||
|
||||
intel_nested_flush_cache(dmar_domain, inv_entry.addr,
|
||||
inv_entry.npages,
|
||||
inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
|
||||
cache_tag_flush_range(dmar_domain, inv_entry.addr,
|
||||
inv_entry.addr + nrpages_to_size(inv_entry.npages) - 1,
|
||||
inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
|
||||
processed++;
|
||||
}
|
||||
|
||||
@ -206,7 +169,9 @@ struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
|
||||
domain->domain.type = IOMMU_DOMAIN_NESTED;
|
||||
INIT_LIST_HEAD(&domain->devices);
|
||||
INIT_LIST_HEAD(&domain->dev_pasids);
|
||||
INIT_LIST_HEAD(&domain->cache_tags);
|
||||
spin_lock_init(&domain->lock);
|
||||
spin_lock_init(&domain->cache_lock);
|
||||
xa_init(&domain->iommu_array);
|
||||
|
||||
spin_lock(&s2_domain->s1_lock);
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
#include "iommu.h"
|
||||
#include "pasid.h"
|
||||
#include "../iommu-pages.h"
|
||||
|
||||
/*
|
||||
* Intel IOMMU system wide PASID name space:
|
||||
@ -38,7 +39,7 @@ int intel_pasid_alloc_table(struct device *dev)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
struct pasid_table *pasid_table;
|
||||
struct page *pages;
|
||||
struct pasid_dir_entry *dir;
|
||||
u32 max_pasid = 0;
|
||||
int order, size;
|
||||
|
||||
@ -59,14 +60,13 @@ int intel_pasid_alloc_table(struct device *dev)
|
||||
|
||||
size = max_pasid >> (PASID_PDE_SHIFT - 3);
|
||||
order = size ? get_order(size) : 0;
|
||||
pages = alloc_pages_node(info->iommu->node,
|
||||
GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!pages) {
|
||||
dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order);
|
||||
if (!dir) {
|
||||
kfree(pasid_table);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pasid_table->table = page_address(pages);
|
||||
pasid_table->table = dir;
|
||||
pasid_table->order = order;
|
||||
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
|
||||
info->pasid_table = pasid_table;
|
||||
@ -97,10 +97,10 @@ void intel_pasid_free_table(struct device *dev)
|
||||
max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
|
||||
for (i = 0; i < max_pde; i++) {
|
||||
table = get_pasid_table_from_pde(&dir[i]);
|
||||
free_pgtable_page(table);
|
||||
iommu_free_page(table);
|
||||
}
|
||||
|
||||
free_pages((unsigned long)pasid_table->table, pasid_table->order);
|
||||
iommu_free_pages(pasid_table->table, pasid_table->order);
|
||||
kfree(pasid_table);
|
||||
}
|
||||
|
||||
@ -146,7 +146,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
|
||||
retry:
|
||||
entries = get_pasid_table_from_pde(&dir[dir_index]);
|
||||
if (!entries) {
|
||||
entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
|
||||
entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC);
|
||||
if (!entries)
|
||||
return NULL;
|
||||
|
||||
@ -158,7 +158,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
|
||||
*/
|
||||
if (cmpxchg64(&dir[dir_index].val, 0ULL,
|
||||
(u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
|
||||
free_pgtable_page(entries);
|
||||
iommu_free_page(entries);
|
||||
goto retry;
|
||||
}
|
||||
if (!ecap_coherent(info->iommu->ecap)) {
|
||||
|
@ -11,7 +11,6 @@ enum latency_type {
|
||||
DMAR_LATENCY_INV_IOTLB = 0,
|
||||
DMAR_LATENCY_INV_DEVTLB,
|
||||
DMAR_LATENCY_INV_IEC,
|
||||
DMAR_LATENCY_PRQ,
|
||||
DMAR_LATENCY_NUM
|
||||
};
|
||||
|
||||
|
@ -22,57 +22,22 @@
|
||||
#include "iommu.h"
|
||||
#include "pasid.h"
|
||||
#include "perf.h"
|
||||
#include "../iommu-pages.h"
|
||||
#include "trace.h"
|
||||
|
||||
static irqreturn_t prq_event_thread(int irq, void *d);
|
||||
|
||||
static DEFINE_XARRAY_ALLOC(pasid_private_array);
|
||||
static int pasid_private_add(ioasid_t pasid, void *priv)
|
||||
{
|
||||
return xa_alloc(&pasid_private_array, &pasid, priv,
|
||||
XA_LIMIT(pasid, pasid), GFP_ATOMIC);
|
||||
}
|
||||
|
||||
static void pasid_private_remove(ioasid_t pasid)
|
||||
{
|
||||
xa_erase(&pasid_private_array, pasid);
|
||||
}
|
||||
|
||||
static void *pasid_private_find(ioasid_t pasid)
|
||||
{
|
||||
return xa_load(&pasid_private_array, pasid);
|
||||
}
|
||||
|
||||
static struct intel_svm_dev *
|
||||
svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
|
||||
{
|
||||
struct intel_svm_dev *sdev = NULL, *t;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(t, &svm->devs, list) {
|
||||
if (t->dev == dev) {
|
||||
sdev = t;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return sdev;
|
||||
}
|
||||
|
||||
int intel_svm_enable_prq(struct intel_iommu *iommu)
|
||||
{
|
||||
struct iopf_queue *iopfq;
|
||||
struct page *pages;
|
||||
int irq, ret;
|
||||
|
||||
pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
|
||||
if (!pages) {
|
||||
iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
|
||||
if (!iommu->prq) {
|
||||
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
|
||||
iommu->name);
|
||||
return -ENOMEM;
|
||||
}
|
||||
iommu->prq = page_address(pages);
|
||||
|
||||
irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu);
|
||||
if (irq <= 0) {
|
||||
@ -117,7 +82,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
|
||||
dmar_free_hwirq(irq);
|
||||
iommu->pr_irq = 0;
|
||||
free_prq:
|
||||
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
|
||||
iommu_free_pages(iommu->prq, PRQ_ORDER);
|
||||
iommu->prq = NULL;
|
||||
|
||||
return ret;
|
||||
@ -140,7 +105,7 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
|
||||
iommu->iopf_queue = NULL;
|
||||
}
|
||||
|
||||
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
|
||||
iommu_free_pages(iommu->prq, PRQ_ORDER);
|
||||
iommu->prq = NULL;
|
||||
|
||||
return 0;
|
||||
@ -168,94 +133,32 @@ void intel_svm_check(struct intel_iommu *iommu)
|
||||
iommu->flags |= VTD_FLAG_SVM_CAPABLE;
|
||||
}
|
||||
|
||||
static void __flush_svm_range_dev(struct intel_svm *svm,
|
||||
struct intel_svm_dev *sdev,
|
||||
unsigned long address,
|
||||
unsigned long pages, int ih)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(sdev->dev);
|
||||
|
||||
if (WARN_ON(!pages))
|
||||
return;
|
||||
|
||||
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
|
||||
if (info->ats_enabled) {
|
||||
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
|
||||
svm->pasid, sdev->qdep, address,
|
||||
order_base_2(pages));
|
||||
quirk_extra_dev_tlb_flush(info, address, order_base_2(pages),
|
||||
svm->pasid, sdev->qdep);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_flush_svm_range_dev(struct intel_svm *svm,
|
||||
struct intel_svm_dev *sdev,
|
||||
unsigned long address,
|
||||
unsigned long pages, int ih)
|
||||
{
|
||||
unsigned long shift = ilog2(__roundup_pow_of_two(pages));
|
||||
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
|
||||
unsigned long start = ALIGN_DOWN(address, align);
|
||||
unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
|
||||
|
||||
while (start < end) {
|
||||
__flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
|
||||
start += align;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
|
||||
unsigned long pages, int ih)
|
||||
{
|
||||
struct intel_svm_dev *sdev;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdev, &svm->devs, list)
|
||||
intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void intel_flush_svm_all(struct intel_svm *svm)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
struct intel_svm_dev *sdev;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdev, &svm->devs, list) {
|
||||
info = dev_iommu_priv_get(sdev->dev);
|
||||
|
||||
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
|
||||
if (info->ats_enabled) {
|
||||
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
|
||||
svm->pasid, sdev->qdep,
|
||||
0, 64 - VTD_PAGE_SHIFT);
|
||||
quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
|
||||
svm->pasid, sdev->qdep);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Pages have been freed at this point */
|
||||
static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
|
||||
struct dmar_domain *domain = container_of(mn, struct dmar_domain, notifier);
|
||||
|
||||
if (start == 0 && end == -1UL) {
|
||||
intel_flush_svm_all(svm);
|
||||
if (start == 0 && end == ULONG_MAX) {
|
||||
cache_tag_flush_all(domain);
|
||||
return;
|
||||
}
|
||||
|
||||
intel_flush_svm_range(svm, start,
|
||||
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
|
||||
/*
|
||||
* The mm_types defines vm_end as the first byte after the end address,
|
||||
* different from IOMMU subsystem using the last address of an address
|
||||
* range.
|
||||
*/
|
||||
cache_tag_flush_range(domain, start, end - 1, 0);
|
||||
}
|
||||
|
||||
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
{
|
||||
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
|
||||
struct intel_svm_dev *sdev;
|
||||
struct dmar_domain *domain = container_of(mn, struct dmar_domain, notifier);
|
||||
struct dev_pasid_info *dev_pasid;
|
||||
struct device_domain_info *info;
|
||||
unsigned long flags;
|
||||
|
||||
/* This might end up being called from exit_mmap(), *before* the page
|
||||
* tables are cleared. And __mmu_notifier_release() will delete us from
|
||||
@ -269,157 +172,78 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
* page) so that we end up taking a fault that the hardware really
|
||||
* *has* to handle gracefully without affecting other processes.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdev, &svm->devs, list)
|
||||
intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
|
||||
svm->pasid, true);
|
||||
rcu_read_unlock();
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
|
||||
info = dev_iommu_priv_get(dev_pasid->dev);
|
||||
intel_pasid_tear_down_entry(info->iommu, dev_pasid->dev,
|
||||
dev_pasid->pasid, true);
|
||||
}
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
}
|
||||
|
||||
static void intel_mm_free_notifier(struct mmu_notifier *mn)
|
||||
{
|
||||
kfree(container_of(mn, struct dmar_domain, notifier));
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops intel_mmuops = {
|
||||
.release = intel_mm_release,
|
||||
.arch_invalidate_secondary_tlbs = intel_arch_invalidate_secondary_tlbs,
|
||||
.free_notifier = intel_mm_free_notifier,
|
||||
};
|
||||
|
||||
static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
|
||||
struct intel_svm **rsvm,
|
||||
struct intel_svm_dev **rsdev)
|
||||
{
|
||||
struct intel_svm_dev *sdev = NULL;
|
||||
struct intel_svm *svm;
|
||||
|
||||
if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
svm = pasid_private_find(pasid);
|
||||
if (IS_ERR(svm))
|
||||
return PTR_ERR(svm);
|
||||
|
||||
if (!svm)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If we found svm for the PASID, there must be at least one device
|
||||
* bond.
|
||||
*/
|
||||
if (WARN_ON(list_empty(&svm->devs)))
|
||||
return -EINVAL;
|
||||
sdev = svm_lookup_device_by_dev(svm, dev);
|
||||
|
||||
out:
|
||||
*rsvm = svm;
|
||||
*rsdev = sdev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
struct mm_struct *mm = domain->mm;
|
||||
struct intel_svm_dev *sdev;
|
||||
struct intel_svm *svm;
|
||||
struct dev_pasid_info *dev_pasid;
|
||||
unsigned long sflags;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
svm = pasid_private_find(pasid);
|
||||
if (!svm) {
|
||||
svm = kzalloc(sizeof(*svm), GFP_KERNEL);
|
||||
if (!svm)
|
||||
return -ENOMEM;
|
||||
dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL);
|
||||
if (!dev_pasid)
|
||||
return -ENOMEM;
|
||||
|
||||
svm->pasid = pasid;
|
||||
svm->mm = mm;
|
||||
INIT_LIST_HEAD_RCU(&svm->devs);
|
||||
dev_pasid->dev = dev;
|
||||
dev_pasid->pasid = pasid;
|
||||
|
||||
svm->notifier.ops = &intel_mmuops;
|
||||
ret = mmu_notifier_register(&svm->notifier, mm);
|
||||
if (ret) {
|
||||
kfree(svm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pasid_private_add(svm->pasid, svm);
|
||||
if (ret) {
|
||||
mmu_notifier_unregister(&svm->notifier, mm);
|
||||
kfree(svm);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
|
||||
if (!sdev) {
|
||||
ret = -ENOMEM;
|
||||
goto free_svm;
|
||||
}
|
||||
|
||||
sdev->dev = dev;
|
||||
sdev->iommu = iommu;
|
||||
sdev->did = FLPT_DEFAULT_DID;
|
||||
sdev->sid = PCI_DEVID(info->bus, info->devfn);
|
||||
if (info->ats_enabled) {
|
||||
sdev->qdep = info->ats_qdep;
|
||||
if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
|
||||
sdev->qdep = 0;
|
||||
}
|
||||
ret = cache_tag_assign_domain(to_dmar_domain(domain), dev, pasid);
|
||||
if (ret)
|
||||
goto free_dev_pasid;
|
||||
|
||||
/* Setup the pasid table: */
|
||||
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
|
||||
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid,
|
||||
FLPT_DEFAULT_DID, sflags);
|
||||
if (ret)
|
||||
goto free_sdev;
|
||||
goto unassign_tag;
|
||||
|
||||
list_add_rcu(&sdev->list, &svm->devs);
|
||||
spin_lock_irqsave(&dmar_domain->lock, flags);
|
||||
list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids);
|
||||
spin_unlock_irqrestore(&dmar_domain->lock, flags);
|
||||
|
||||
return 0;
|
||||
|
||||
free_sdev:
|
||||
kfree(sdev);
|
||||
free_svm:
|
||||
if (list_empty(&svm->devs)) {
|
||||
mmu_notifier_unregister(&svm->notifier, mm);
|
||||
pasid_private_remove(pasid);
|
||||
kfree(svm);
|
||||
}
|
||||
unassign_tag:
|
||||
cache_tag_unassign_domain(to_dmar_domain(domain), dev, pasid);
|
||||
free_dev_pasid:
|
||||
kfree(dev_pasid);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
|
||||
{
|
||||
struct intel_svm_dev *sdev;
|
||||
struct intel_svm *svm;
|
||||
struct mm_struct *mm;
|
||||
|
||||
if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
|
||||
return;
|
||||
mm = svm->mm;
|
||||
|
||||
if (sdev) {
|
||||
list_del_rcu(&sdev->list);
|
||||
kfree_rcu(sdev, rcu);
|
||||
|
||||
if (list_empty(&svm->devs)) {
|
||||
if (svm->notifier.ops)
|
||||
mmu_notifier_unregister(&svm->notifier, mm);
|
||||
pasid_private_remove(svm->pasid);
|
||||
kfree(svm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Page request queue descriptor */
|
||||
struct page_req_dsc {
|
||||
union {
|
||||
struct {
|
||||
u64 type:8;
|
||||
u64 pasid_present:1;
|
||||
u64 priv_data_present:1;
|
||||
u64 rsvd:6;
|
||||
u64 rsvd:7;
|
||||
u64 rid:16;
|
||||
u64 pasid:20;
|
||||
u64 exe_req:1;
|
||||
@ -438,7 +262,8 @@ struct page_req_dsc {
|
||||
};
|
||||
u64 qw_1;
|
||||
};
|
||||
u64 priv_data[2];
|
||||
u64 qw_2;
|
||||
u64 qw_3;
|
||||
};
|
||||
|
||||
static bool is_canonical_address(u64 addr)
|
||||
@ -572,24 +397,6 @@ static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
|
||||
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
|
||||
event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
|
||||
}
|
||||
if (desc->priv_data_present) {
|
||||
/*
|
||||
* Set last page in group bit if private data is present,
|
||||
* page response is required as it does for LPIG.
|
||||
* iommu_report_device_fault() doesn't understand this vendor
|
||||
* specific requirement thus we set last_page as a workaround.
|
||||
*/
|
||||
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
|
||||
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
|
||||
event.fault.prm.private_data[0] = desc->priv_data[0];
|
||||
event.fault.prm.private_data[1] = desc->priv_data[1];
|
||||
} else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) {
|
||||
/*
|
||||
* If the private data fields are not used by hardware, use it
|
||||
* to monitor the prq handle latency.
|
||||
*/
|
||||
event.fault.prm.private_data[0] = ktime_to_ns(ktime_get());
|
||||
}
|
||||
|
||||
iommu_report_device_fault(dev, &event);
|
||||
}
|
||||
@ -597,39 +404,23 @@ static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
|
||||
static void handle_bad_prq_event(struct intel_iommu *iommu,
|
||||
struct page_req_dsc *req, int result)
|
||||
{
|
||||
struct qi_desc desc;
|
||||
struct qi_desc desc = { };
|
||||
|
||||
pr_err("%s: Invalid page request: %08llx %08llx\n",
|
||||
iommu->name, ((unsigned long long *)req)[0],
|
||||
((unsigned long long *)req)[1]);
|
||||
|
||||
/*
|
||||
* Per VT-d spec. v3.0 ch7.7, system software must
|
||||
* respond with page group response if private data
|
||||
* is present (PDP) or last page in group (LPIG) bit
|
||||
* is set. This is an additional VT-d feature beyond
|
||||
* PCI ATS spec.
|
||||
*/
|
||||
if (!req->lpig && !req->priv_data_present)
|
||||
if (!req->lpig)
|
||||
return;
|
||||
|
||||
desc.qw0 = QI_PGRP_PASID(req->pasid) |
|
||||
QI_PGRP_DID(req->rid) |
|
||||
QI_PGRP_PASID_P(req->pasid_present) |
|
||||
QI_PGRP_PDP(req->priv_data_present) |
|
||||
QI_PGRP_RESP_CODE(result) |
|
||||
QI_PGRP_RESP_TYPE;
|
||||
desc.qw1 = QI_PGRP_IDX(req->prg_index) |
|
||||
QI_PGRP_LPIG(req->lpig);
|
||||
|
||||
if (req->priv_data_present) {
|
||||
desc.qw2 = req->priv_data[0];
|
||||
desc.qw3 = req->priv_data[1];
|
||||
} else {
|
||||
desc.qw2 = 0;
|
||||
desc.qw3 = 0;
|
||||
}
|
||||
|
||||
qi_submit_sync(iommu, &desc, 1, 0);
|
||||
}
|
||||
|
||||
@ -697,7 +488,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
||||
|
||||
intel_svm_prq_report(iommu, dev, req);
|
||||
trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
|
||||
req->priv_data[0], req->priv_data[1],
|
||||
req->qw_2, req->qw_3,
|
||||
iommu->prq_seq_number++);
|
||||
mutex_unlock(&iommu->iopf_lock);
|
||||
prq_advance:
|
||||
@ -736,7 +527,7 @@ void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
u8 bus = info->bus, devfn = info->devfn;
|
||||
struct iommu_fault_page_request *prm;
|
||||
bool private_present;
|
||||
struct qi_desc desc;
|
||||
bool pasid_present;
|
||||
bool last_page;
|
||||
u16 sid;
|
||||
@ -744,42 +535,25 @@ void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
|
||||
prm = &evt->fault.prm;
|
||||
sid = PCI_DEVID(bus, devfn);
|
||||
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
|
||||
private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
|
||||
last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
|
||||
|
||||
/*
|
||||
* Per VT-d spec. v3.0 ch7.7, system software must respond
|
||||
* with page group response if private data is present (PDP)
|
||||
* or last page in group (LPIG) bit is set. This is an
|
||||
* additional VT-d requirement beyond PCI ATS spec.
|
||||
*/
|
||||
if (last_page || private_present) {
|
||||
struct qi_desc desc;
|
||||
desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
|
||||
QI_PGRP_PASID_P(pasid_present) |
|
||||
QI_PGRP_RESP_CODE(msg->code) |
|
||||
QI_PGRP_RESP_TYPE;
|
||||
desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
|
||||
desc.qw2 = 0;
|
||||
desc.qw3 = 0;
|
||||
|
||||
desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
|
||||
QI_PGRP_PASID_P(pasid_present) |
|
||||
QI_PGRP_PDP(private_present) |
|
||||
QI_PGRP_RESP_CODE(msg->code) |
|
||||
QI_PGRP_RESP_TYPE;
|
||||
desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
|
||||
desc.qw2 = 0;
|
||||
desc.qw3 = 0;
|
||||
|
||||
if (private_present) {
|
||||
desc.qw2 = prm->private_data[0];
|
||||
desc.qw3 = prm->private_data[1];
|
||||
} else if (prm->private_data[0]) {
|
||||
dmar_latency_update(iommu, DMAR_LATENCY_PRQ,
|
||||
ktime_to_ns(ktime_get()) - prm->private_data[0]);
|
||||
}
|
||||
|
||||
qi_submit_sync(iommu, &desc, 1, 0);
|
||||
}
|
||||
qi_submit_sync(iommu, &desc, 1, 0);
|
||||
}
|
||||
|
||||
static void intel_svm_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
kfree(to_dmar_domain(domain));
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
|
||||
/* dmar_domain free is deferred to the mmu free_notifier callback. */
|
||||
mmu_notifier_put(&dmar_domain->notifier);
|
||||
}
|
||||
|
||||
static const struct iommu_domain_ops intel_svm_domain_ops = {
|
||||
@ -787,14 +561,29 @@ static const struct iommu_domain_ops intel_svm_domain_ops = {
|
||||
.free = intel_svm_domain_free
|
||||
};
|
||||
|
||||
struct iommu_domain *intel_svm_domain_alloc(void)
|
||||
struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
struct dmar_domain *domain;
|
||||
int ret;
|
||||
|
||||
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
||||
if (!domain)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
domain->domain.ops = &intel_svm_domain_ops;
|
||||
domain->use_first_level = true;
|
||||
INIT_LIST_HEAD(&domain->dev_pasids);
|
||||
INIT_LIST_HEAD(&domain->cache_tags);
|
||||
spin_lock_init(&domain->cache_lock);
|
||||
spin_lock_init(&domain->lock);
|
||||
|
||||
domain->notifier.ops = &intel_mmuops;
|
||||
ret = mmu_notifier_register(&domain->notifier, mm);
|
||||
if (ret) {
|
||||
kfree(domain);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return &domain->domain;
|
||||
}
|
||||
|
@ -89,6 +89,103 @@ TRACE_EVENT(prq_report,
|
||||
__entry->dw1, __entry->dw2, __entry->dw3)
|
||||
)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(cache_tag_log,
|
||||
TP_PROTO(struct cache_tag *tag),
|
||||
TP_ARGS(tag),
|
||||
TP_STRUCT__entry(
|
||||
__string(iommu, tag->iommu->name)
|
||||
__string(dev, dev_name(tag->dev))
|
||||
__field(u16, type)
|
||||
__field(u16, domain_id)
|
||||
__field(u32, pasid)
|
||||
__field(u32, users)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__assign_str(iommu, tag->iommu->name);
|
||||
__assign_str(dev, dev_name(tag->dev));
|
||||
__entry->type = tag->type;
|
||||
__entry->domain_id = tag->domain_id;
|
||||
__entry->pasid = tag->pasid;
|
||||
__entry->users = tag->users;
|
||||
),
|
||||
TP_printk("%s/%s type %s did %d pasid %d ref %d",
|
||||
__get_str(iommu), __get_str(dev),
|
||||
__print_symbolic(__entry->type,
|
||||
{ CACHE_TAG_IOTLB, "iotlb" },
|
||||
{ CACHE_TAG_DEVTLB, "devtlb" },
|
||||
{ CACHE_TAG_NESTING_IOTLB, "nesting_iotlb" },
|
||||
{ CACHE_TAG_NESTING_DEVTLB, "nesting_devtlb" }),
|
||||
__entry->domain_id, __entry->pasid, __entry->users
|
||||
)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(cache_tag_log, cache_tag_assign,
|
||||
TP_PROTO(struct cache_tag *tag),
|
||||
TP_ARGS(tag)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
|
||||
TP_PROTO(struct cache_tag *tag),
|
||||
TP_ARGS(tag)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
|
||||
TP_PROTO(struct cache_tag *tag),
|
||||
TP_ARGS(tag)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(cache_tag_flush,
|
||||
TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
|
||||
unsigned long addr, unsigned long pages, unsigned long mask),
|
||||
TP_ARGS(tag, start, end, addr, pages, mask),
|
||||
TP_STRUCT__entry(
|
||||
__string(iommu, tag->iommu->name)
|
||||
__string(dev, dev_name(tag->dev))
|
||||
__field(u16, type)
|
||||
__field(u16, domain_id)
|
||||
__field(u32, pasid)
|
||||
__field(unsigned long, start)
|
||||
__field(unsigned long, end)
|
||||
__field(unsigned long, addr)
|
||||
__field(unsigned long, pages)
|
||||
__field(unsigned long, mask)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__assign_str(iommu, tag->iommu->name);
|
||||
__assign_str(dev, dev_name(tag->dev));
|
||||
__entry->type = tag->type;
|
||||
__entry->domain_id = tag->domain_id;
|
||||
__entry->pasid = tag->pasid;
|
||||
__entry->start = start;
|
||||
__entry->end = end;
|
||||
__entry->addr = addr;
|
||||
__entry->pages = pages;
|
||||
__entry->mask = mask;
|
||||
),
|
||||
TP_printk("%s %s[%d] type %s did %d [0x%lx-0x%lx] addr 0x%lx pages 0x%lx mask 0x%lx",
|
||||
__get_str(iommu), __get_str(dev), __entry->pasid,
|
||||
__print_symbolic(__entry->type,
|
||||
{ CACHE_TAG_IOTLB, "iotlb" },
|
||||
{ CACHE_TAG_DEVTLB, "devtlb" },
|
||||
{ CACHE_TAG_NESTING_IOTLB, "nesting_iotlb" },
|
||||
{ CACHE_TAG_NESTING_DEVTLB, "nesting_devtlb" }),
|
||||
__entry->domain_id, __entry->start, __entry->end,
|
||||
__entry->addr, __entry->pages, __entry->mask
|
||||
)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range,
|
||||
TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
|
||||
unsigned long addr, unsigned long pages, unsigned long mask),
|
||||
TP_ARGS(tag, start, end, addr, pages, mask)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(cache_tag_flush, cache_tag_flush_range_np,
|
||||
TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
|
||||
unsigned long addr, unsigned long pages, unsigned long mask),
|
||||
TP_ARGS(tag, start, end, addr, pages, mask)
|
||||
);
|
||||
#endif /* _TRACE_INTEL_IOMMU_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#include "io-pgtable-arm.h"
|
||||
#include "iommu-pages.h"
|
||||
|
||||
#define ARM_LPAE_MAX_ADDR_BITS 52
|
||||
#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
|
||||
@ -198,14 +199,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
||||
|
||||
VM_BUG_ON((gfp & __GFP_HIGHMEM));
|
||||
|
||||
if (cfg->alloc) {
|
||||
if (cfg->alloc)
|
||||
pages = cfg->alloc(cookie, size, gfp);
|
||||
} else {
|
||||
struct page *p;
|
||||
|
||||
p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
|
||||
pages = p ? page_address(p) : NULL;
|
||||
}
|
||||
else
|
||||
pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
|
||||
|
||||
if (!pages)
|
||||
return NULL;
|
||||
@ -233,7 +230,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
|
||||
if (cfg->free)
|
||||
cfg->free(cookie, pages, size);
|
||||
else
|
||||
free_pages((unsigned long)pages, order);
|
||||
iommu_free_pages(pages, order);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -249,7 +246,7 @@ static void __arm_lpae_free_pages(void *pages, size_t size,
|
||||
if (cfg->free)
|
||||
cfg->free(cookie, pages, size);
|
||||
else
|
||||
free_pages((unsigned long)pages, get_order(size));
|
||||
iommu_free_pages(pages, get_order(size));
|
||||
}
|
||||
|
||||
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include "iommu-pages.h"
|
||||
|
||||
#define DART1_MAX_ADDR_BITS 36
|
||||
|
||||
@ -106,18 +107,12 @@ static phys_addr_t iopte_to_paddr(dart_iopte pte,
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static void *__dart_alloc_pages(size_t size, gfp_t gfp,
|
||||
struct io_pgtable_cfg *cfg)
|
||||
static void *__dart_alloc_pages(size_t size, gfp_t gfp)
|
||||
{
|
||||
int order = get_order(size);
|
||||
struct page *p;
|
||||
|
||||
VM_BUG_ON((gfp & __GFP_HIGHMEM));
|
||||
p = alloc_pages(gfp | __GFP_ZERO, order);
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
return page_address(p);
|
||||
return iommu_alloc_pages(gfp, order);
|
||||
}
|
||||
|
||||
static int dart_init_pte(struct dart_io_pgtable *data,
|
||||
@ -262,13 +257,13 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
||||
|
||||
/* no L2 table present */
|
||||
if (!pte) {
|
||||
cptep = __dart_alloc_pages(tblsz, gfp, cfg);
|
||||
cptep = __dart_alloc_pages(tblsz, gfp);
|
||||
if (!cptep)
|
||||
return -ENOMEM;
|
||||
|
||||
pte = dart_install_table(cptep, ptep, 0, data);
|
||||
if (pte)
|
||||
free_pages((unsigned long)cptep, get_order(tblsz));
|
||||
iommu_free_pages(cptep, get_order(tblsz));
|
||||
|
||||
/* L2 table is present (now) */
|
||||
pte = READ_ONCE(*ptep);
|
||||
@ -419,8 +414,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
|
||||
|
||||
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
|
||||
data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL,
|
||||
cfg);
|
||||
data->pgd[i] = __dart_alloc_pages(DART_GRANULE(data), GFP_KERNEL);
|
||||
if (!data->pgd[i])
|
||||
goto out_free_data;
|
||||
cfg->apple_dart_cfg.ttbr[i] = virt_to_phys(data->pgd[i]);
|
||||
@ -429,9 +423,10 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
return &data->iop;
|
||||
|
||||
out_free_data:
|
||||
while (--i >= 0)
|
||||
free_pages((unsigned long)data->pgd[i],
|
||||
get_order(DART_GRANULE(data)));
|
||||
while (--i >= 0) {
|
||||
iommu_free_pages(data->pgd[i],
|
||||
get_order(DART_GRANULE(data)));
|
||||
}
|
||||
kfree(data);
|
||||
return NULL;
|
||||
}
|
||||
@ -439,6 +434,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
static void apple_dart_free_pgtable(struct io_pgtable *iop)
|
||||
{
|
||||
struct dart_io_pgtable *data = io_pgtable_to_data(iop);
|
||||
int order = get_order(DART_GRANULE(data));
|
||||
dart_iopte *ptep, *end;
|
||||
int i;
|
||||
|
||||
@ -449,15 +445,10 @@ static void apple_dart_free_pgtable(struct io_pgtable *iop)
|
||||
while (ptep != end) {
|
||||
dart_iopte pte = *ptep++;
|
||||
|
||||
if (pte) {
|
||||
unsigned long page =
|
||||
(unsigned long)iopte_deref(pte, data);
|
||||
|
||||
free_pages(page, get_order(DART_GRANULE(data)));
|
||||
}
|
||||
if (pte)
|
||||
iommu_free_pages(iopte_deref(pte, data), order);
|
||||
}
|
||||
free_pages((unsigned long)data->pgd[i],
|
||||
get_order(DART_GRANULE(data)));
|
||||
iommu_free_pages(data->pgd[i], order);
|
||||
}
|
||||
|
||||
kfree(data);
|
||||
|
186
drivers/iommu/iommu-pages.h
Normal file
186
drivers/iommu/iommu-pages.h
Normal file
@ -0,0 +1,186 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2024, Google LLC.
|
||||
* Pasha Tatashin <pasha.tatashin@soleen.com>
|
||||
*/
|
||||
|
||||
#ifndef __IOMMU_PAGES_H
|
||||
#define __IOMMU_PAGES_H
|
||||
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
/*
|
||||
* All page allocations that should be reported to as "iommu-pagetables" to
|
||||
* userspace must use one of the functions below. This includes allocations of
|
||||
* page-tables and other per-iommu_domain configuration structures.
|
||||
*
|
||||
* This is necessary for the proper accounting as IOMMU state can be rather
|
||||
* large, i.e. multiple gigabytes in size.
|
||||
*/
|
||||
|
||||
/**
|
||||
* __iommu_alloc_account - account for newly allocated page.
|
||||
* @page: head struct page of the page.
|
||||
* @order: order of the page
|
||||
*/
|
||||
static inline void __iommu_alloc_account(struct page *page, int order)
|
||||
{
|
||||
const long pgcnt = 1l << order;
|
||||
|
||||
mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt);
|
||||
mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* __iommu_free_account - account a page that is about to be freed.
|
||||
* @page: head struct page of the page.
|
||||
* @order: order of the page
|
||||
*/
|
||||
static inline void __iommu_free_account(struct page *page, int order)
|
||||
{
|
||||
const long pgcnt = 1l << order;
|
||||
|
||||
mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt);
|
||||
mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
|
||||
}
|
||||
|
||||
/**
|
||||
* __iommu_alloc_pages - allocate a zeroed page of a given order.
|
||||
* @gfp: buddy allocator flags
|
||||
* @order: page order
|
||||
*
|
||||
* returns the head struct page of the allocated page.
|
||||
*/
|
||||
static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = alloc_pages(gfp | __GFP_ZERO, order);
|
||||
if (unlikely(!page))
|
||||
return NULL;
|
||||
|
||||
__iommu_alloc_account(page, order);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
/**
|
||||
* __iommu_free_pages - free page of a given order
|
||||
* @page: head struct page of the page
|
||||
* @order: page order
|
||||
*/
|
||||
static inline void __iommu_free_pages(struct page *page, int order)
|
||||
{
|
||||
if (!page)
|
||||
return;
|
||||
|
||||
__iommu_free_account(page, order);
|
||||
__free_pages(page, order);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_alloc_pages_node - allocate a zeroed page of a given order from
|
||||
* specific NUMA node.
|
||||
* @nid: memory NUMA node id
|
||||
* @gfp: buddy allocator flags
|
||||
* @order: page order
|
||||
*
|
||||
* returns the virtual address of the allocated page
|
||||
*/
|
||||
static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
|
||||
{
|
||||
struct page *page = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
|
||||
|
||||
if (unlikely(!page))
|
||||
return NULL;
|
||||
|
||||
__iommu_alloc_account(page, order);
|
||||
|
||||
return page_address(page);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_alloc_pages - allocate a zeroed page of a given order
|
||||
* @gfp: buddy allocator flags
|
||||
* @order: page order
|
||||
*
|
||||
* returns the virtual address of the allocated page
|
||||
*/
|
||||
static inline void *iommu_alloc_pages(gfp_t gfp, int order)
|
||||
{
|
||||
struct page *page = __iommu_alloc_pages(gfp, order);
|
||||
|
||||
if (unlikely(!page))
|
||||
return NULL;
|
||||
|
||||
return page_address(page);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
|
||||
* @nid: memory NUMA node id
|
||||
* @gfp: buddy allocator flags
|
||||
*
|
||||
* returns the virtual address of the allocated page
|
||||
*/
|
||||
static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
|
||||
{
|
||||
return iommu_alloc_pages_node(nid, gfp, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_alloc_page - allocate a zeroed page
|
||||
* @gfp: buddy allocator flags
|
||||
*
|
||||
* returns the virtual address of the allocated page
|
||||
*/
|
||||
static inline void *iommu_alloc_page(gfp_t gfp)
|
||||
{
|
||||
return iommu_alloc_pages(gfp, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_free_pages - free page of a given order
|
||||
* @virt: virtual address of the page to be freed.
|
||||
* @order: page order
|
||||
*/
|
||||
static inline void iommu_free_pages(void *virt, int order)
|
||||
{
|
||||
if (!virt)
|
||||
return;
|
||||
|
||||
__iommu_free_pages(virt_to_page(virt), order);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_free_page - free page
|
||||
* @virt: virtual address of the page to be freed.
|
||||
*/
|
||||
static inline void iommu_free_page(void *virt)
|
||||
{
|
||||
iommu_free_pages(virt, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_put_pages_list - free a list of pages.
|
||||
* @page: the head of the lru list to be freed.
|
||||
*
|
||||
* There are no locking requirement for these pages, as they are going to be
|
||||
* put on a free list as soon as refcount reaches 0. Pages are put on this LRU
|
||||
* list once they are removed from the IOMMU page tables. However, they can
|
||||
* still be access through debugfs.
|
||||
*/
|
||||
static inline void iommu_put_pages_list(struct list_head *page)
|
||||
{
|
||||
while (!list_empty(page)) {
|
||||
struct page *p = list_entry(page->prev, struct page, lru);
|
||||
|
||||
list_del(&p->lru);
|
||||
__iommu_free_account(p, 0);
|
||||
put_page(p);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __IOMMU_PAGES_H */
|
@ -108,8 +108,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
|
||||
|
||||
/* Allocate a new domain and set it on device pasid. */
|
||||
domain = iommu_sva_domain_alloc(dev, mm);
|
||||
if (!domain) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(domain)) {
|
||||
ret = PTR_ERR(domain);
|
||||
goto out_free_handle;
|
||||
}
|
||||
|
||||
@ -283,9 +283,15 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
struct iommu_domain *domain;
|
||||
|
||||
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
|
||||
if (!domain)
|
||||
return NULL;
|
||||
if (ops->domain_alloc_sva) {
|
||||
domain = ops->domain_alloc_sva(dev, mm);
|
||||
if (IS_ERR(domain))
|
||||
return domain;
|
||||
} else {
|
||||
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
|
||||
if (!domain)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
domain->type = IOMMU_DOMAIN_SVA;
|
||||
mmgrab(mm);
|
||||
|
@ -581,10 +581,11 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
|
||||
if (list_empty(&group->entry))
|
||||
list_add_tail(&group->entry, group_list);
|
||||
}
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
iommu_dma_set_pci_32bit_workaround(dev);
|
||||
if (group->default_domain)
|
||||
iommu_setup_dma_ops(dev);
|
||||
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1828,6 +1829,8 @@ int bus_iommu_probe(const struct bus_type *bus)
|
||||
mutex_unlock(&group->mutex);
|
||||
return ret;
|
||||
}
|
||||
for_each_group_device(group, gdev)
|
||||
iommu_setup_dma_ops(gdev->dev);
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
/*
|
||||
@ -3066,18 +3069,9 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Release the mutex here because ops->probe_finalize() call-back of
|
||||
* some vendor IOMMU drivers calls arm_iommu_attach_device() which
|
||||
* in-turn might call back into IOMMU core code, where it tries to take
|
||||
* group->mutex, resulting in a deadlock.
|
||||
*/
|
||||
mutex_unlock(&group->mutex);
|
||||
|
||||
/* Make sure dma_ops is appropriatley set */
|
||||
for_each_group_device(group, gdev)
|
||||
iommu_group_do_probe_finalize(gdev->dev);
|
||||
return count;
|
||||
iommu_setup_dma_ops(gdev->dev);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&group->mutex);
|
||||
@ -3317,27 +3311,39 @@ EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
|
||||
static int __iommu_set_group_pasid(struct iommu_domain *domain,
|
||||
struct iommu_group *group, ioasid_t pasid)
|
||||
{
|
||||
struct group_device *device;
|
||||
int ret = 0;
|
||||
struct group_device *device, *last_gdev;
|
||||
int ret;
|
||||
|
||||
for_each_group_device(group, device) {
|
||||
ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
|
||||
if (ret)
|
||||
break;
|
||||
goto err_revert;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_revert:
|
||||
last_gdev = device;
|
||||
for_each_group_device(group, device) {
|
||||
const struct iommu_ops *ops = dev_iommu_ops(device->dev);
|
||||
|
||||
if (device == last_gdev)
|
||||
break;
|
||||
ops->remove_dev_pasid(device->dev, pasid, domain);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __iommu_remove_group_pasid(struct iommu_group *group,
|
||||
ioasid_t pasid)
|
||||
ioasid_t pasid,
|
||||
struct iommu_domain *domain)
|
||||
{
|
||||
struct group_device *device;
|
||||
const struct iommu_ops *ops;
|
||||
|
||||
for_each_group_device(group, device) {
|
||||
ops = dev_iommu_ops(device->dev);
|
||||
ops->remove_dev_pasid(device->dev, pasid);
|
||||
ops->remove_dev_pasid(device->dev, pasid, domain);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3383,10 +3389,8 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
|
||||
}
|
||||
|
||||
ret = __iommu_set_group_pasid(domain, group, pasid);
|
||||
if (ret) {
|
||||
__iommu_remove_group_pasid(group, pasid);
|
||||
if (ret)
|
||||
xa_erase(&group->pasid_array, pasid);
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&group->mutex);
|
||||
return ret;
|
||||
@ -3409,7 +3413,7 @@ void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
|
||||
struct iommu_group *group = dev->iommu_group;
|
||||
|
||||
mutex_lock(&group->mutex);
|
||||
__iommu_remove_group_pasid(group, pasid);
|
||||
__iommu_remove_group_pasid(group, pasid, domain);
|
||||
WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
|
||||
mutex_unlock(&group->mutex);
|
||||
}
|
||||
|
@ -154,7 +154,10 @@ int __init irq_remap_enable_fault_handling(void)
|
||||
if (!remap_ops->enable_faulting)
|
||||
return -ENODEV;
|
||||
|
||||
return remap_ops->enable_faulting();
|
||||
cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dmar:enable_fault_handling",
|
||||
remap_ops->enable_faulting, NULL);
|
||||
|
||||
return remap_ops->enable_faulting(smp_processor_id());
|
||||
}
|
||||
|
||||
void panic_if_irq_remap(const char *msg)
|
||||
|
@ -41,7 +41,7 @@ struct irq_remap_ops {
|
||||
int (*reenable)(int);
|
||||
|
||||
/* Enable fault handling */
|
||||
int (*enable_faulting)(void);
|
||||
int (*enable_faulting)(unsigned int);
|
||||
};
|
||||
|
||||
extern struct irq_remap_ops intel_irq_remap_ops;
|
||||
|
@ -26,6 +26,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include "iommu-pages.h"
|
||||
|
||||
/** MMU register offsets */
|
||||
#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
|
||||
#define RK_MMU_STATUS 0x04
|
||||
@ -727,14 +729,14 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
|
||||
if (rk_dte_is_pt_valid(dte))
|
||||
goto done;
|
||||
|
||||
page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags);
|
||||
page_table = iommu_alloc_page(GFP_ATOMIC | rk_ops->gfp_flags);
|
||||
if (!page_table)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pt_dma = dma_map_single(dma_dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(dma_dev, pt_dma)) {
|
||||
dev_err(dma_dev, "DMA mapping error while allocating page table\n");
|
||||
free_page((unsigned long)page_table);
|
||||
iommu_free_page(page_table);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -1061,7 +1063,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
|
||||
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
|
||||
* Allocate one 4 KiB page for each table.
|
||||
*/
|
||||
rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags);
|
||||
rk_domain->dt = iommu_alloc_page(GFP_KERNEL | rk_ops->gfp_flags);
|
||||
if (!rk_domain->dt)
|
||||
goto err_free_domain;
|
||||
|
||||
@ -1083,7 +1085,7 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
|
||||
return &rk_domain->domain;
|
||||
|
||||
err_free_dt:
|
||||
free_page((unsigned long)rk_domain->dt);
|
||||
iommu_free_page(rk_domain->dt);
|
||||
err_free_domain:
|
||||
kfree(rk_domain);
|
||||
|
||||
@ -1104,13 +1106,13 @@ static void rk_iommu_domain_free(struct iommu_domain *domain)
|
||||
u32 *page_table = phys_to_virt(pt_phys);
|
||||
dma_unmap_single(dma_dev, pt_phys,
|
||||
SPAGE_SIZE, DMA_TO_DEVICE);
|
||||
free_page((unsigned long)page_table);
|
||||
iommu_free_page(page_table);
|
||||
}
|
||||
}
|
||||
|
||||
dma_unmap_single(dma_dev, rk_domain->dt_dma,
|
||||
SPAGE_SIZE, DMA_TO_DEVICE);
|
||||
free_page((unsigned long)rk_domain->dt);
|
||||
iommu_free_page(rk_domain->dt);
|
||||
|
||||
kfree(rk_domain);
|
||||
}
|
||||
|
@ -695,11 +695,6 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
|
||||
return size;
|
||||
}
|
||||
|
||||
static void s390_iommu_probe_finalize(struct device *dev)
|
||||
{
|
||||
iommu_setup_dma_ops(dev, 0, U64_MAX);
|
||||
}
|
||||
|
||||
struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
|
||||
{
|
||||
if (!zdev || !zdev->s390_domain)
|
||||
@ -785,7 +780,6 @@ static const struct iommu_ops s390_iommu_ops = {
|
||||
.capable = s390_iommu_capable,
|
||||
.domain_alloc_paging = s390_domain_alloc_paging,
|
||||
.probe_device = s390_iommu_probe_device,
|
||||
.probe_finalize = s390_iommu_probe_finalize,
|
||||
.release_device = s390_iommu_release_device,
|
||||
.device_group = generic_device_group,
|
||||
.pgsize_bitmap = SZ_4K,
|
||||
|
@ -26,6 +26,8 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "iommu-pages.h"
|
||||
|
||||
#define IOMMU_RESET_REG 0x010
|
||||
#define IOMMU_RESET_RELEASE_ALL 0xffffffff
|
||||
#define IOMMU_ENABLE_REG 0x020
|
||||
@ -679,8 +681,7 @@ sun50i_iommu_domain_alloc_paging(struct device *dev)
|
||||
if (!sun50i_domain)
|
||||
return NULL;
|
||||
|
||||
sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
get_order(DT_SIZE));
|
||||
sun50i_domain->dt = iommu_alloc_pages(GFP_KERNEL, get_order(DT_SIZE));
|
||||
if (!sun50i_domain->dt)
|
||||
goto err_free_domain;
|
||||
|
||||
@ -702,7 +703,7 @@ static void sun50i_iommu_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
|
||||
|
||||
free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
|
||||
iommu_free_pages(sun50i_domain->dt, get_order(DT_SIZE));
|
||||
sun50i_domain->dt = NULL;
|
||||
|
||||
kfree(sun50i_domain);
|
||||
|
@ -19,6 +19,8 @@
|
||||
#include <soc/tegra/ahb.h>
|
||||
#include <soc/tegra/mc.h>
|
||||
|
||||
#include "iommu-pages.h"
|
||||
|
||||
struct tegra_smmu_group {
|
||||
struct list_head list;
|
||||
struct tegra_smmu *smmu;
|
||||
@ -282,7 +284,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
|
||||
|
||||
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
|
||||
|
||||
as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
|
||||
as->pd = __iommu_alloc_pages(GFP_KERNEL | __GFP_DMA, 0);
|
||||
if (!as->pd) {
|
||||
kfree(as);
|
||||
return NULL;
|
||||
@ -290,7 +292,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
|
||||
|
||||
as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
|
||||
if (!as->count) {
|
||||
__free_page(as->pd);
|
||||
__iommu_free_pages(as->pd, 0);
|
||||
kfree(as);
|
||||
return NULL;
|
||||
}
|
||||
@ -298,7 +300,7 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
|
||||
as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
|
||||
if (!as->pts) {
|
||||
kfree(as->count);
|
||||
__free_page(as->pd);
|
||||
__iommu_free_pages(as->pd, 0);
|
||||
kfree(as);
|
||||
return NULL;
|
||||
}
|
||||
@ -599,14 +601,14 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
|
||||
dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(smmu->dev, dma)) {
|
||||
__free_page(page);
|
||||
__iommu_free_pages(page, 0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!smmu_dma_addr_valid(smmu, dma)) {
|
||||
dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
|
||||
DMA_TO_DEVICE);
|
||||
__free_page(page);
|
||||
__iommu_free_pages(page, 0);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -649,7 +651,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
|
||||
tegra_smmu_set_pde(as, iova, 0);
|
||||
|
||||
dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
|
||||
__free_page(page);
|
||||
__iommu_free_pages(page, 0);
|
||||
as->pts[pde] = NULL;
|
||||
}
|
||||
}
|
||||
@ -688,7 +690,7 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
|
||||
if (gfpflags_allow_blocking(gfp))
|
||||
spin_unlock_irqrestore(&as->lock, *flags);
|
||||
|
||||
page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
|
||||
page = __iommu_alloc_pages(gfp | __GFP_DMA, 0);
|
||||
|
||||
if (gfpflags_allow_blocking(gfp))
|
||||
spin_lock_irqsave(&as->lock, *flags);
|
||||
@ -700,7 +702,7 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
|
||||
*/
|
||||
if (as->pts[pde]) {
|
||||
if (page)
|
||||
__free_page(page);
|
||||
__iommu_free_pages(page, 0);
|
||||
|
||||
page = as->pts[pde];
|
||||
}
|
||||
|
@ -1025,15 +1025,6 @@ static struct iommu_device *viommu_probe_device(struct device *dev)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void viommu_probe_finalize(struct device *dev)
|
||||
{
|
||||
#ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
/* First clear the DMA ops in case we're switching from a DMA domain */
|
||||
set_dma_ops(dev, NULL);
|
||||
iommu_setup_dma_ops(dev, 0, U64_MAX);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void viommu_release_device(struct device *dev)
|
||||
{
|
||||
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
|
||||
@ -1073,7 +1064,6 @@ static struct iommu_ops viommu_ops = {
|
||||
.capable = viommu_capable,
|
||||
.domain_alloc = viommu_domain_alloc,
|
||||
.probe_device = viommu_probe_device,
|
||||
.probe_finalize = viommu_probe_finalize,
|
||||
.release_device = viommu_release_device,
|
||||
.device_group = viommu_device_group,
|
||||
.get_resv_regions = viommu_get_resv_regions,
|
||||
|
@ -95,8 +95,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
|
||||
{
|
||||
const struct bus_dma_region *map = NULL;
|
||||
struct device_node *bus_np;
|
||||
u64 dma_start = 0;
|
||||
u64 mask, end, size = 0;
|
||||
u64 mask, end = 0;
|
||||
bool coherent;
|
||||
int iommu_ret;
|
||||
int ret;
|
||||
@ -117,34 +116,8 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
|
||||
if (!force_dma)
|
||||
return ret == -ENODEV ? 0 : ret;
|
||||
} else {
|
||||
const struct bus_dma_region *r = map;
|
||||
u64 dma_end = 0;
|
||||
|
||||
/* Determine the overall bounds of all DMA regions */
|
||||
for (dma_start = ~0; r->size; r++) {
|
||||
/* Take lower and upper limits */
|
||||
if (r->dma_start < dma_start)
|
||||
dma_start = r->dma_start;
|
||||
if (r->dma_start + r->size > dma_end)
|
||||
dma_end = r->dma_start + r->size;
|
||||
}
|
||||
size = dma_end - dma_start;
|
||||
|
||||
/*
|
||||
* Add a work around to treat the size as mask + 1 in case
|
||||
* it is defined in DT as a mask.
|
||||
*/
|
||||
if (size & 1) {
|
||||
dev_warn(dev, "Invalid size 0x%llx for dma-range(s)\n",
|
||||
size);
|
||||
size = size + 1;
|
||||
}
|
||||
|
||||
if (!size) {
|
||||
dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
|
||||
kfree(map);
|
||||
return -EINVAL;
|
||||
}
|
||||
end = dma_range_map_max(map);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -158,16 +131,15 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
|
||||
dev->dma_mask = &dev->coherent_dma_mask;
|
||||
}
|
||||
|
||||
if (!size && dev->coherent_dma_mask)
|
||||
size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
|
||||
else if (!size)
|
||||
size = 1ULL << 32;
|
||||
if (!end && dev->coherent_dma_mask)
|
||||
end = dev->coherent_dma_mask;
|
||||
else if (!end)
|
||||
end = (1ULL << 32) - 1;
|
||||
|
||||
/*
|
||||
* Limit coherent and dma mask based on size and default mask
|
||||
* set by the driver.
|
||||
*/
|
||||
end = dma_start + size - 1;
|
||||
mask = DMA_BIT_MASK(ilog2(end) + 1);
|
||||
dev->coherent_dma_mask &= mask;
|
||||
*dev->dma_mask &= mask;
|
||||
@ -201,7 +173,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
|
||||
} else
|
||||
dev_dbg(dev, "device is behind an iommu\n");
|
||||
|
||||
arch_setup_dma_ops(dev, dma_start, size, coherent);
|
||||
arch_setup_dma_ops(dev, coherent);
|
||||
|
||||
if (iommu_ret)
|
||||
of_dma_set_restricted_buffer(dev, np);
|
||||
|
@ -39,7 +39,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode,
|
||||
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode,
|
||||
struct list_head *head);
|
||||
/* IOMMU interface */
|
||||
int iort_dma_get_ranges(struct device *dev, u64 *size);
|
||||
int iort_dma_get_ranges(struct device *dev, u64 *limit);
|
||||
int iort_iommu_configure_id(struct device *dev, const u32 *id_in);
|
||||
void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head);
|
||||
phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
|
||||
@ -55,7 +55,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *hea
|
||||
static inline
|
||||
void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { }
|
||||
/* IOMMU interface */
|
||||
static inline int iort_dma_get_ranges(struct device *dev, u64 *size)
|
||||
static inline int iort_dma_get_ranges(struct device *dev, u64 *limit)
|
||||
{ return -ENODEV; }
|
||||
static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in)
|
||||
{ return -ENODEV; }
|
||||
|
@ -54,6 +54,24 @@ static inline phys_addr_t translate_dma_to_phys(struct device *dev,
|
||||
return (phys_addr_t)-1;
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map)
|
||||
{
|
||||
dma_addr_t ret = (dma_addr_t)U64_MAX;
|
||||
|
||||
for (; map->size; map++)
|
||||
ret = min(ret, map->dma_start);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map)
|
||||
{
|
||||
dma_addr_t ret = 0;
|
||||
|
||||
for (; map->size; map++)
|
||||
ret = max(ret, map->dma_start + map->size - 1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
|
||||
#include <asm/dma-direct.h>
|
||||
#ifndef phys_to_dma_unencrypted
|
||||
|
@ -426,11 +426,9 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
bool coherent);
|
||||
void arch_setup_dma_ops(struct device *dev, bool coherent);
|
||||
#else
|
||||
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
u64 size, bool coherent)
|
||||
static inline void arch_setup_dma_ops(struct device *dev, bool coherent)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
|
||||
|
@ -117,7 +117,7 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
|
||||
int count);
|
||||
/* Intel IOMMU detection */
|
||||
void detect_intel_iommu(void);
|
||||
extern int enable_drhd_fault_handling(void);
|
||||
extern int enable_drhd_fault_handling(unsigned int cpu);
|
||||
extern int dmar_device_add(acpi_handle handle);
|
||||
extern int dmar_device_remove(acpi_handle handle);
|
||||
|
||||
|
@ -69,8 +69,7 @@ enum iommu_fault_type {
|
||||
struct iommu_fault_page_request {
|
||||
#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
|
||||
#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
|
||||
#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
|
||||
#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3)
|
||||
#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2)
|
||||
u32 flags;
|
||||
u32 pasid;
|
||||
u32 grpid;
|
||||
@ -518,6 +517,7 @@ static inline int __iommu_copy_struct_from_user_array(
|
||||
* Upon failure, ERR_PTR must be returned.
|
||||
* @domain_alloc_paging: Allocate an iommu_domain that can be used for
|
||||
* UNMANAGED, DMA, and DMA_FQ domain types.
|
||||
* @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
|
||||
* @probe_device: Add device to iommu driver handling
|
||||
* @release_device: Remove device from iommu driver handling
|
||||
* @probe_finalize: Do final setup work after the device is added to an IOMMU
|
||||
@ -558,6 +558,8 @@ struct iommu_ops {
|
||||
struct device *dev, u32 flags, struct iommu_domain *parent,
|
||||
const struct iommu_user_data *user_data);
|
||||
struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
|
||||
struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
|
||||
struct mm_struct *mm);
|
||||
|
||||
struct iommu_device *(*probe_device)(struct device *dev);
|
||||
void (*release_device)(struct device *dev);
|
||||
@ -578,7 +580,8 @@ struct iommu_ops {
|
||||
struct iommu_page_response *msg);
|
||||
|
||||
int (*def_domain_type)(struct device *dev);
|
||||
void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid);
|
||||
void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid,
|
||||
struct iommu_domain *domain);
|
||||
|
||||
const struct iommu_domain_ops *default_domain_ops;
|
||||
unsigned long pgsize_bitmap;
|
||||
@ -1445,9 +1448,6 @@ static inline void iommu_debugfs_setup(void) {}
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
#include <linux/msi.h>
|
||||
|
||||
/* Setup call for arch DMA mapping code */
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
|
||||
|
||||
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
|
||||
|
||||
int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
|
||||
@ -1458,10 +1458,6 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
|
||||
struct msi_desc;
|
||||
struct msi_msg;
|
||||
|
||||
static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
|
||||
{
|
||||
return -ENODEV;
|
||||
|
@ -205,7 +205,10 @@ enum node_stat_item {
|
||||
NR_KERNEL_SCS_KB, /* measured in KiB */
|
||||
#endif
|
||||
NR_PAGETABLE, /* used for pagetables */
|
||||
NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */
|
||||
NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */
|
||||
#ifdef CONFIG_IOMMU_SUPPORT
|
||||
NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */
|
||||
#endif
|
||||
#ifdef CONFIG_SWAP
|
||||
NR_SWAPCACHE,
|
||||
#endif
|
||||
|
@ -1242,6 +1242,9 @@ const char * const vmstat_text[] = {
|
||||
#endif
|
||||
"nr_page_table_pages",
|
||||
"nr_sec_page_table_pages",
|
||||
#ifdef CONFIG_IOMMU_SUPPORT
|
||||
"nr_iommu_pages",
|
||||
#endif
|
||||
#ifdef CONFIG_SWAP
|
||||
"nr_swapcached",
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user