IOMMU Updates for Linux v6.8

Including:
 
 	- Core changes:
 	  - Fix race conditions in device probe path
 	  - Retire IOMMU bus_ops
 	  - Support for passing custom allocators to page table drivers
 	  - Clean up Kconfig around IOMMU_SVA
 	  - Support for sharing SVA domains with all devices bound to
 	    a mm
 	  - Firmware data parsing cleanup
 	  - Tracing improvements for iommu-dma code
 	  - Some smaller fixes and cleanups
 
 	- ARM-SMMU drivers:
 	  - Device-tree binding updates:
 	     - Add additional compatible strings for Qualcomm SoCs
 	     - Document Adreno clocks for Qualcomm's SM8350 SoC
 	  - SMMUv2:
 	    - Implement support for the ->domain_alloc_paging() callback
 	    - Ensure Secure context is restored following suspend of Qualcomm SMMU
 	      implementation
 	  - SMMUv3:
 	    - Disable stalling mode for the "quiet" context descriptor
 	    - Minor refactoring and driver cleanups
 
 	 - Intel VT-d driver:
 	   - Cleanup and refactoring
 
 	 - AMD IOMMU driver:
 	   - Improve IO TLB invalidation logic
 	   - Small cleanups and improvements
 
 	 - Rockchip IOMMU driver:
 	   - DT binding update to add Rockchip RK3588
 
 	 - Apple DART driver:
 	   - Apple M1 USB4/Thunderbolt DART support
 	   - Cleanups
 
 	 - Virtio IOMMU driver:
 	   - Add support for iotlb_sync_map
 	   - Enable deferred IO TLB flushes
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmWecQoACgkQK/BELZcB
 GuN5ZxAAzC5QUKAzANx0puk7QhPpKKlbSvj6Q7iRgCLk00KJO1+VQh9v4ouCmXqF
 kn3Ko8gddjhtrgwN0OQ54F39cLUrp1SBemy71K5YOR+vu8VKtwtmawZGeeRZ+k+B
 Eohw58oaXTiR1maYvoLixLYczLrjklqyJOQ1vZ0GxFGxDqrFByAryHDgG/3OCpJx
 C9e6PsLbbfhfqA8Kv97iKcBqniGbXxAMuodqSUG0buQ3oZgfpIP6Bt3EgUzFGPGk
 3BTlYxowS/gkjUWd3fgjQFIFLTA01u9FhpA2Jb0a4v67pUCR64YxHN7rBQ6ZChtG
 kB9laQfU9re79RsHhqQzr0JT9x/eyq7pzGzjp5TV5TPW6IW+sqjMIPhzd9P08Ef7
 BclkCVobx0jSAHOhnnG4QJiKANr2Y2oM3HfsAJccMMY45RRhUKmVqM7jxMPfGn3A
 i+inlee73xTjZXJse1EWG1fmKKMLvX9LDEp4DyOfn9CqVT+7hpZvzPjfbGr937Rm
 JlwXhF3rQXEpOCagEsbt1vOf+V0e9QiCLf1Y2KpkIkDbE5wwSD/2qLm3tFhJG3oF
 fkW+J14Cid0pj+hY0afGe0kOUOIYlimu0nFmSf0pzMH+UktZdKogSfyb1gSDsy+S
 rsZRGPFhMJ832ExqhlDfxqBebqh+jsfKynlskui6Td5C9ZULaHA=
 =q751
 -----END PGP SIGNATURE-----

Merge tag 'iommu-updates-v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu updates from Joerg Roedel:
 "Core changes:
   - Fix race conditions in device probe path
   - Retire IOMMU bus_ops
   - Support for passing custom allocators to page table drivers
   - Clean up Kconfig around IOMMU_SVA
   - Support for sharing SVA domains with all devices bound to a mm
   - Firmware data parsing cleanup
   - Tracing improvements for iommu-dma code
   - Some smaller fixes and cleanups

  ARM-SMMU drivers:
   - Device-tree binding updates:
      - Add additional compatible strings for Qualcomm SoCs
      - Document Adreno clocks for Qualcomm's SM8350 SoC
   - SMMUv2:
      - Implement support for the ->domain_alloc_paging() callback
      - Ensure Secure context is restored following suspend of Qualcomm
        SMMU implementation
   - SMMUv3:
      - Disable stalling mode for the "quiet" context descriptor
      - Minor refactoring and driver cleanups

  Intel VT-d driver:
   - Cleanup and refactoring

  AMD IOMMU driver:
   - Improve IO TLB invalidation logic
   - Small cleanups and improvements

  Rockchip IOMMU driver:
   - DT binding update to add Rockchip RK3588

  Apple DART driver:
   - Apple M1 USB4/Thunderbolt DART support
   - Cleanups

  Virtio IOMMU driver:
   - Add support for iotlb_sync_map
   - Enable deferred IO TLB flushes"

* tag 'iommu-updates-v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (66 commits)
  iommu: Don't reserve 0-length IOVA region
  iommu/vt-d: Move inline helpers to header files
  iommu/vt-d: Remove unused vcmd interfaces
  iommu/vt-d: Remove unused parameter of intel_pasid_setup_pass_through()
  iommu/vt-d: Refactor device_to_iommu() to retrieve iommu directly
  iommu/sva: Fix memory leak in iommu_sva_bind_device()
  dt-bindings: iommu: rockchip: Add Rockchip RK3588
  iommu/dma: Trace bounce buffer usage when mapping buffers
  iommu/arm-smmu: Convert to domain_alloc_paging()
  iommu/arm-smmu: Pass arm_smmu_domain to internal functions
  iommu/arm-smmu: Implement IOMMU_DOMAIN_BLOCKED
  iommu/arm-smmu: Convert to a global static identity domain
  iommu/arm-smmu: Reorganize arm_smmu_domain_add_master()
  iommu/arm-smmu-v3: Remove ARM_SMMU_DOMAIN_NESTED
  iommu/arm-smmu-v3: Master cannot be NULL in arm_smmu_write_strtab_ent()
  iommu/arm-smmu-v3: Add a type for the STE
  iommu/arm-smmu-v3: disable stall for quiet_cd
  iommu/qcom: restore IOMMU state if needed
  iommu/arm-smmu-qcom: Add QCM2290 MDSS compatible
  iommu/arm-smmu-qcom: Add missing GMU entry to match table
  ...
This commit is contained in:
Linus Torvalds 2024-01-18 15:16:57 -08:00
commit 0dde2bf67b
63 changed files with 1279 additions and 1043 deletions

View File

@ -24,6 +24,7 @@ properties:
compatible: compatible:
enum: enum:
- apple,t8103-dart - apple,t8103-dart
- apple,t8103-usb4-dart
- apple,t8110-dart - apple,t8110-dart
- apple,t6000-dart - apple,t6000-dart

View File

@ -56,6 +56,8 @@ properties:
- qcom,sm8350-smmu-500 - qcom,sm8350-smmu-500
- qcom,sm8450-smmu-500 - qcom,sm8450-smmu-500
- qcom,sm8550-smmu-500 - qcom,sm8550-smmu-500
- qcom,sm8650-smmu-500
- qcom,x1e80100-smmu-500
- const: qcom,smmu-500 - const: qcom,smmu-500
- const: arm,mmu-500 - const: arm,mmu-500
@ -89,6 +91,8 @@ properties:
- qcom,sm8150-smmu-500 - qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500 - qcom,sm8250-smmu-500
- qcom,sm8350-smmu-500 - qcom,sm8350-smmu-500
- qcom,sm8450-smmu-500
- qcom,sm8550-smmu-500
- const: qcom,adreno-smmu - const: qcom,adreno-smmu
- const: qcom,smmu-500 - const: qcom,smmu-500
- const: arm,mmu-500 - const: arm,mmu-500
@ -429,6 +433,30 @@ allOf:
- description: interface clock required to access smmu's registers - description: interface clock required to access smmu's registers
through the TCU's programming interface. through the TCU's programming interface.
- if:
properties:
compatible:
items:
- enum:
- qcom,sm8350-smmu-500
- const: qcom,adreno-smmu
- const: qcom,smmu-500
- const: arm,mmu-500
then:
properties:
clock-names:
items:
- const: bus
- const: iface
- const: ahb
- const: hlos1_vote_gpu_smmu
- const: cx_gmu
- const: hub_cx_int
- const: hub_aon
clocks:
minItems: 7
maxItems: 7
- if: - if:
properties: properties:
compatible: compatible:
@ -453,6 +481,50 @@ allOf:
- description: Voter clock required for HLOS SMMU access - description: Voter clock required for HLOS SMMU access
- description: Interface clock required for register access - description: Interface clock required for register access
- if:
properties:
compatible:
const: qcom,sm8450-smmu-500
then:
properties:
clock-names:
items:
- const: gmu
- const: hub
- const: hlos
- const: bus
- const: iface
- const: ahb
clocks:
items:
- description: GMU clock
- description: GPU HUB clock
- description: HLOS vote clock
- description: GPU memory bus clock
- description: GPU SNoC bus clock
- description: GPU AHB clock
- if:
properties:
compatible:
const: qcom,sm8550-smmu-500
then:
properties:
clock-names:
items:
- const: hlos
- const: bus
- const: iface
- const: ahb
clocks:
items:
- description: HLOS vote clock
- description: GPU memory bus clock
- description: GPU SNoC bus clock
- description: GPU AHB clock
# Disallow clocks for all other platforms with specific compatibles # Disallow clocks for all other platforms with specific compatibles
- if: - if:
properties: properties:
@ -472,9 +544,8 @@ allOf:
- qcom,sdx65-smmu-500 - qcom,sdx65-smmu-500
- qcom,sm6350-smmu-500 - qcom,sm6350-smmu-500
- qcom,sm6375-smmu-500 - qcom,sm6375-smmu-500
- qcom,sm8350-smmu-500 - qcom,sm8650-smmu-500
- qcom,sm8450-smmu-500 - qcom,x1e80100-smmu-500
- qcom,sm8550-smmu-500
then: then:
properties: properties:
clock-names: false clock-names: false

View File

@ -19,9 +19,14 @@ description: |+
properties: properties:
compatible: compatible:
enum: oneOf:
- rockchip,iommu - enum:
- rockchip,rk3568-iommu - rockchip,iommu
- rockchip,rk3568-iommu
- items:
- enum:
- rockchip,rk3588-iommu
- const: rockchip,rk3568-iommu
reg: reg:
items: items:

View File

@ -301,6 +301,11 @@ config ARCH_HAS_DMA_CLEAR_UNCACHED
config ARCH_HAS_CPU_FINALIZE_INIT config ARCH_HAS_CPU_FINALIZE_INIT
bool bool
# The architecture has a per-task state that includes the mm's PASID
config ARCH_HAS_CPU_PASID
bool
select IOMMU_MM_DATA
config HAVE_ARCH_THREAD_STRUCT_WHITELIST config HAVE_ARCH_THREAD_STRUCT_WHITELIST
bool bool
help help

View File

@ -91,7 +91,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
* Plug in direct dma map ops. * Plug in direct dma map ops.
*/ */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
/* /*
* IOC hardware snoops all DMA traffic keeping the caches consistent * IOC hardware snoops all DMA traffic keeping the caches consistent

View File

@ -34,7 +34,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
} }
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
if (IS_ENABLED(CONFIG_CPU_V7M)) { if (IS_ENABLED(CONFIG_CPU_V7M)) {
/* /*

View File

@ -1710,7 +1710,7 @@ void arm_iommu_detach_device(struct device *dev)
EXPORT_SYMBOL_GPL(arm_iommu_detach_device); EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
struct dma_iommu_mapping *mapping; struct dma_iommu_mapping *mapping;
@ -1745,7 +1745,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
#else #else
static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
} }
@ -1754,7 +1754,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
#endif /* CONFIG_ARM_DMA_USE_IOMMU */ #endif /* CONFIG_ARM_DMA_USE_IOMMU */
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
/* /*
* Due to legacy code that sets the ->dma_coherent flag from a bus * Due to legacy code that sets the ->dma_coherent flag from a bus
@ -1773,8 +1773,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
if (dev->dma_ops) if (dev->dma_ops)
return; return;
if (iommu) if (device_iommu_mapped(dev))
arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent); arm_setup_iommu_dma_ops(dev, dma_base, size, coherent);
xen_setup_dma_ops(dev); xen_setup_dma_ops(dev);
dev->archdata.dma_ops_setup = true; dev->archdata.dma_ops_setup = true;

View File

@ -47,7 +47,7 @@ void arch_teardown_dma_ops(struct device *dev)
#endif #endif
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
int cls = cache_line_size_of_cpu(); int cls = cache_line_size_of_cpu();
@ -58,7 +58,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
ARCH_DMA_MINALIGN, cls); ARCH_DMA_MINALIGN, cls);
dev->dma_coherent = coherent; dev->dma_coherent = coherent;
if (iommu) if (device_iommu_mapped(dev))
iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1); iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
xen_setup_dma_ops(dev); xen_setup_dma_ops(dev);

View File

@ -138,7 +138,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
dev->dma_coherent = coherent; dev->dma_coherent = coherent;
} }

View File

@ -129,7 +129,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
} }
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) bool coherent)
{ {
WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN, WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN,
TAINT_CPU_OUT_OF_SPEC, TAINT_CPU_OUT_OF_SPEC,

View File

@ -72,6 +72,7 @@ config X86
select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
select ARCH_HAS_CPU_FINALIZE_INIT select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_CPU_PASID if IOMMU_SVA
select ARCH_HAS_CURRENT_STACK_POINTER select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE

View File

@ -566,7 +566,7 @@ static bool fixup_iopl_exception(struct pt_regs *regs)
*/ */
static bool try_fixup_enqcmd_gp(void) static bool try_fixup_enqcmd_gp(void)
{ {
#ifdef CONFIG_IOMMU_SVA #ifdef CONFIG_ARCH_HAS_CPU_PASID
u32 pasid; u32 pasid;
/* /*
@ -592,7 +592,7 @@ static bool try_fixup_enqcmd_gp(void)
if (!mm_valid_pasid(current->mm)) if (!mm_valid_pasid(current->mm))
return false; return false;
pasid = current->mm->pasid; pasid = mm_get_enqcmd_pasid(current->mm);
/* /*
* Did this thread already have its PASID activated? * Did this thread already have its PASID activated?

View File

@ -1561,8 +1561,7 @@ static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev)
return fwspec ? fwspec->ops : NULL; return fwspec ? fwspec->ops : NULL;
} }
static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev, static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
const u32 *id_in)
{ {
int err; int err;
const struct iommu_ops *ops; const struct iommu_ops *ops;
@ -1576,7 +1575,7 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
ops = acpi_iommu_fwspec_ops(dev); ops = acpi_iommu_fwspec_ops(dev);
if (ops) { if (ops) {
mutex_unlock(&iommu_probe_device_lock); mutex_unlock(&iommu_probe_device_lock);
return ops; return 0;
} }
err = iort_iommu_configure_id(dev, id_in); err = iort_iommu_configure_id(dev, id_in);
@ -1593,12 +1592,14 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
/* Ignore all other errors apart from EPROBE_DEFER */ /* Ignore all other errors apart from EPROBE_DEFER */
if (err == -EPROBE_DEFER) { if (err == -EPROBE_DEFER) {
return ERR_PTR(err); return err;
} else if (err) { } else if (err) {
dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
return NULL; return -ENODEV;
} }
return acpi_iommu_fwspec_ops(dev); if (!acpi_iommu_fwspec_ops(dev))
return -ENODEV;
return 0;
} }
#else /* !CONFIG_IOMMU_API */ #else /* !CONFIG_IOMMU_API */
@ -1610,10 +1611,9 @@ int acpi_iommu_fwspec_init(struct device *dev, u32 id,
return -ENODEV; return -ENODEV;
} }
static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev, static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
const u32 *id_in)
{ {
return NULL; return -ENODEV;
} }
#endif /* !CONFIG_IOMMU_API */ #endif /* !CONFIG_IOMMU_API */
@ -1627,7 +1627,7 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id) const u32 *input_id)
{ {
const struct iommu_ops *iommu; int ret;
if (attr == DEV_DMA_NOT_SUPPORTED) { if (attr == DEV_DMA_NOT_SUPPORTED) {
set_dma_ops(dev, &dma_dummy_ops); set_dma_ops(dev, &dma_dummy_ops);
@ -1636,12 +1636,16 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
acpi_arch_dma_setup(dev); acpi_arch_dma_setup(dev);
iommu = acpi_iommu_configure_id(dev, input_id); ret = acpi_iommu_configure_id(dev, input_id);
if (PTR_ERR(iommu) == -EPROBE_DEFER) if (ret == -EPROBE_DEFER)
return -EPROBE_DEFER; return -EPROBE_DEFER;
arch_setup_dma_ops(dev, 0, U64_MAX, /*
iommu, attr == DEV_DMA_COHERENT); * Historically this routine doesn't fail driver probing due to errors
* in acpi_iommu_configure_id()
*/
arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT);
return 0; return 0;
} }

View File

@ -1348,8 +1348,8 @@ static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id)
static int tegra_dma_probe(struct platform_device *pdev) static int tegra_dma_probe(struct platform_device *pdev)
{ {
const struct tegra_dma_chip_data *cdata = NULL; const struct tegra_dma_chip_data *cdata = NULL;
struct iommu_fwspec *iommu_spec; unsigned int i;
unsigned int stream_id, i; u32 stream_id;
struct tegra_dma *tdma; struct tegra_dma *tdma;
int ret; int ret;
@ -1378,12 +1378,10 @@ static int tegra_dma_probe(struct platform_device *pdev)
tdma->dma_dev.dev = &pdev->dev; tdma->dma_dev.dev = &pdev->dev;
iommu_spec = dev_iommu_fwspec_get(&pdev->dev); if (!tegra_dev_iommu_get_stream_id(&pdev->dev, &stream_id)) {
if (!iommu_spec) {
dev_err(&pdev->dev, "Missing iommu stream-id\n"); dev_err(&pdev->dev, "Missing iommu stream-id\n");
return -EINVAL; return -EINVAL;
} }
stream_id = iommu_spec->ids[0] & 0xffff;
ret = device_property_read_u32(&pdev->dev, "dma-channel-mask", ret = device_property_read_u32(&pdev->dev, "dma-channel-mask",
&tdma->chan_mask); &tdma->chan_mask);

View File

@ -28,19 +28,14 @@ static void
gp10b_ltc_init(struct nvkm_ltc *ltc) gp10b_ltc_init(struct nvkm_ltc *ltc)
{ {
struct nvkm_device *device = ltc->subdev.device; struct nvkm_device *device = ltc->subdev.device;
struct iommu_fwspec *spec; u32 sid;
nvkm_wr32(device, 0x17e27c, ltc->ltc_nr); nvkm_wr32(device, 0x17e27c, ltc->ltc_nr);
nvkm_wr32(device, 0x17e000, ltc->ltc_nr); nvkm_wr32(device, 0x17e000, ltc->ltc_nr);
nvkm_wr32(device, 0x100800, ltc->ltc_nr); nvkm_wr32(device, 0x100800, ltc->ltc_nr);
spec = dev_iommu_fwspec_get(device->dev); if (tegra_dev_iommu_get_stream_id(device->dev, &sid))
if (spec) {
u32 sid = spec->ids[0] & 0xffff;
/* stream ID */
nvkm_wr32(device, 0x160000, sid << 2); nvkm_wr32(device, 0x160000, sid << 2);
}
} }
static const struct nvkm_ltc_func static const struct nvkm_ltc_func

View File

@ -488,7 +488,7 @@ void hv_setup_dma_ops(struct device *dev, bool coherent)
* Hyper-V does not offer a vIOMMU in the guest * Hyper-V does not offer a vIOMMU in the guest
* VM, so pass 0/NULL for the IOMMU settings * VM, so pass 0/NULL for the IOMMU settings
*/ */
arch_setup_dma_ops(dev, 0, 0, NULL, coherent); arch_setup_dma_ops(dev, 0, 0, coherent);
} }
EXPORT_SYMBOL_GPL(hv_setup_dma_ops); EXPORT_SYMBOL_GPL(hv_setup_dma_ops);

View File

@ -160,6 +160,7 @@ config IOMMU_DMA
# Shared Virtual Addressing # Shared Virtual Addressing
config IOMMU_SVA config IOMMU_SVA
select IOMMU_MM_DATA
bool bool
config FSL_PAMU config FSL_PAMU

View File

@ -53,10 +53,16 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev); void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address); int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
/*
* This function flushes all internal caches of
* the IOMMU used by this driver.
*/
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
void amd_iommu_update_and_flush_device_table(struct protection_domain *domain); void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
void amd_iommu_domain_update(struct protection_domain *domain); void amd_iommu_domain_update(struct protection_domain *domain);
void amd_iommu_domain_flush_complete(struct protection_domain *domain); void amd_iommu_domain_flush_complete(struct protection_domain *domain);
void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain); void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size);
int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid); int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid, int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
unsigned long cr3); unsigned long cr3);

View File

@ -902,12 +902,6 @@ extern int amd_iommu_max_glx_val;
extern u64 amd_iommu_efr; extern u64 amd_iommu_efr;
extern u64 amd_iommu_efr2; extern u64 amd_iommu_efr2;
/*
* This function flushes all internal caches of
* the IOMMU used by this driver.
*/
void iommu_flush_all_caches(struct amd_iommu *iommu);
static inline int get_ioapic_devid(int id) static inline int get_ioapic_devid(int id)
{ {
struct devid_map *entry; struct devid_map *entry;

View File

@ -2223,7 +2223,7 @@ static int __init amd_iommu_init_pci(void)
init_device_table_dma(pci_seg); init_device_table_dma(pci_seg);
for_each_iommu(iommu) for_each_iommu(iommu)
iommu_flush_all_caches(iommu); amd_iommu_flush_all_caches(iommu);
print_iommu_info(); print_iommu_info();
@ -2773,7 +2773,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_xt(iommu); iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu); iommu_enable_irtcachedis(iommu);
iommu_enable(iommu); iommu_enable(iommu);
iommu_flush_all_caches(iommu); amd_iommu_flush_all_caches(iommu);
} }
/* /*
@ -2829,7 +2829,7 @@ static void early_enable_iommus(void)
iommu_enable_xt(iommu); iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu); iommu_enable_irtcachedis(iommu);
iommu_set_device_table(iommu); iommu_set_device_table(iommu);
iommu_flush_all_caches(iommu); amd_iommu_flush_all_caches(iommu);
} }
} }
} }
@ -3293,7 +3293,7 @@ static int __init state_next(void)
uninit_device_table_dma(pci_seg); uninit_device_table_dma(pci_seg);
for_each_iommu(iommu) for_each_iommu(iommu)
iommu_flush_all_caches(iommu); amd_iommu_flush_all_caches(iommu);
} }
} }
return ret; return ret;

View File

@ -369,6 +369,8 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
bool updated = false; bool updated = false;
u64 __pte, *pte; u64 __pte, *pte;
int ret, i, count; int ret, i, count;
size_t size = pgcount << __ffs(pgsize);
unsigned long o_iova = iova;
BUG_ON(!IS_ALIGNED(iova, pgsize)); BUG_ON(!IS_ALIGNED(iova, pgsize));
BUG_ON(!IS_ALIGNED(paddr, pgsize)); BUG_ON(!IS_ALIGNED(paddr, pgsize));
@ -424,8 +426,7 @@ static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
* Updates and flushing already happened in * Updates and flushing already happened in
* increase_address_space(). * increase_address_space().
*/ */
amd_iommu_domain_flush_tlb_pde(dom); amd_iommu_domain_flush_pages(dom, o_iova, size);
amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags); spin_unlock_irqrestore(&dom->lock, flags);
} }

View File

@ -244,7 +244,6 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
unsigned long mapped_size = 0; unsigned long mapped_size = 0;
unsigned long o_iova = iova; unsigned long o_iova = iova;
size_t size = pgcount << __ffs(pgsize); size_t size = pgcount << __ffs(pgsize);
int count = 0;
int ret = 0; int ret = 0;
bool updated = false; bool updated = false;
@ -265,19 +264,14 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
*pte = set_pte_attr(paddr, map_size, prot); *pte = set_pte_attr(paddr, map_size, prot);
count++;
iova += map_size; iova += map_size;
paddr += map_size; paddr += map_size;
mapped_size += map_size; mapped_size += map_size;
} }
out: out:
if (updated) { if (updated)
if (count > 1) amd_iommu_domain_flush_pages(pdom, o_iova, size);
amd_iommu_flush_tlb(&pdom->domain, 0);
else
amd_iommu_flush_page(&pdom->domain, 0, o_iova);
}
if (mapped) if (mapped)
*mapped += mapped_size; *mapped += mapped_size;

View File

@ -64,7 +64,7 @@ LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map); LIST_HEAD(acpihid_map);
const struct iommu_ops amd_iommu_ops; const struct iommu_ops amd_iommu_ops;
const struct iommu_dirty_ops amd_dirty_ops; static const struct iommu_dirty_ops amd_dirty_ops;
int amd_iommu_max_glx_val = -1; int amd_iommu_max_glx_val = -1;
@ -85,6 +85,11 @@ static void detach_device(struct device *dev);
* *
****************************************************************************/ ****************************************************************************/
static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom)
{
return (pdom && (pdom->flags & PD_IOMMUV2_MASK));
}
static inline int get_acpihid_device_id(struct device *dev, static inline int get_acpihid_device_id(struct device *dev,
struct acpihid_map_entry **entry) struct acpihid_map_entry **entry)
{ {
@ -551,8 +556,6 @@ static void amd_iommu_uninit_device(struct device *dev)
if (dev_data->domain) if (dev_data->domain)
detach_device(dev); detach_device(dev);
dev_iommu_priv_set(dev, NULL);
/* /*
* We keep dev_data around for unplugged devices and reuse it when the * We keep dev_data around for unplugged devices and reuse it when the
* device is re-plugged - not doing so would introduce a ton of races. * device is re-plugged - not doing so would introduce a ton of races.
@ -1124,68 +1127,44 @@ static inline u64 build_inv_address(u64 address, size_t size)
} }
static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
size_t size, u16 domid, int pde) size_t size, u16 domid,
ioasid_t pasid, bool gn)
{ {
u64 inv_address = build_inv_address(address, size); u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd)); memset(cmd, 0, sizeof(*cmd));
cmd->data[1] |= domid; cmd->data[1] |= domid;
cmd->data[2] = lower_32_bits(inv_address); cmd->data[2] = lower_32_bits(inv_address);
cmd->data[3] = upper_32_bits(inv_address); cmd->data[3] = upper_32_bits(inv_address);
/* PDE bit - we want to flush everything, not only the PTEs */
cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
if (gn) {
cmd->data[0] |= pasid;
cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
}
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
} }
static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
u64 address, size_t size) u64 address, size_t size,
ioasid_t pasid, bool gn)
{ {
u64 inv_address = build_inv_address(address, size); u64 inv_address = build_inv_address(address, size);
memset(cmd, 0, sizeof(*cmd)); memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = devid; cmd->data[0] = devid;
cmd->data[0] |= (qdep & 0xff) << 24; cmd->data[0] |= (qdep & 0xff) << 24;
cmd->data[1] = devid; cmd->data[1] = devid;
cmd->data[2] = lower_32_bits(inv_address); cmd->data[2] = lower_32_bits(inv_address);
cmd->data[3] = upper_32_bits(inv_address); cmd->data[3] = upper_32_bits(inv_address);
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); if (gn) {
} cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
cmd->data[1] |= (pasid & 0xff) << 16;
cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
}
static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
u64 address, bool size)
{
memset(cmd, 0, sizeof(*cmd));
address &= ~(0xfffULL);
cmd->data[0] = pasid;
cmd->data[1] = domid;
cmd->data[2] = lower_32_bits(address);
cmd->data[3] = upper_32_bits(address);
cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
if (size)
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
}
static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
int qdep, u64 address, bool size)
{
memset(cmd, 0, sizeof(*cmd));
address &= ~(0xfffULL);
cmd->data[0] = devid;
cmd->data[0] |= ((pasid >> 8) & 0xff) << 16;
cmd->data[0] |= (qdep & 0xff) << 24;
cmd->data[1] = devid;
cmd->data[1] |= (pasid & 0xff) << 16;
cmd->data[2] = lower_32_bits(address);
cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
cmd->data[3] = upper_32_bits(address);
if (size)
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
} }
@ -1341,7 +1320,7 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
for (dom_id = 0; dom_id <= last_bdf; ++dom_id) { for (dom_id = 0; dom_id <= last_bdf; ++dom_id) {
struct iommu_cmd cmd; struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
dom_id, 1); dom_id, IOMMU_NO_PASID, false);
iommu_queue_command(iommu, &cmd); iommu_queue_command(iommu, &cmd);
} }
@ -1353,7 +1332,7 @@ static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
struct iommu_cmd cmd; struct iommu_cmd cmd;
build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
dom_id, 1); dom_id, IOMMU_NO_PASID, false);
iommu_queue_command(iommu, &cmd); iommu_queue_command(iommu, &cmd);
iommu_completion_wait(iommu); iommu_completion_wait(iommu);
@ -1392,7 +1371,7 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
iommu_completion_wait(iommu); iommu_completion_wait(iommu);
} }
void iommu_flush_all_caches(struct amd_iommu *iommu) void amd_iommu_flush_all_caches(struct amd_iommu *iommu)
{ {
if (check_feature(FEATURE_IA)) { if (check_feature(FEATURE_IA)) {
amd_iommu_flush_all(iommu); amd_iommu_flush_all(iommu);
@ -1406,8 +1385,8 @@ void iommu_flush_all_caches(struct amd_iommu *iommu)
/* /*
* Command send function for flushing on-device TLB * Command send function for flushing on-device TLB
*/ */
static int device_flush_iotlb(struct iommu_dev_data *dev_data, static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address,
u64 address, size_t size) size_t size, ioasid_t pasid, bool gn)
{ {
struct amd_iommu *iommu; struct amd_iommu *iommu;
struct iommu_cmd cmd; struct iommu_cmd cmd;
@ -1418,7 +1397,8 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
if (!iommu) if (!iommu)
return -EINVAL; return -EINVAL;
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size); build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address,
size, pasid, gn);
return iommu_queue_command(iommu, &cmd); return iommu_queue_command(iommu, &cmd);
} }
@ -1464,8 +1444,11 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
return ret; return ret;
} }
if (dev_data->ats_enabled) if (dev_data->ats_enabled) {
ret = device_flush_iotlb(dev_data, 0, ~0UL); /* Invalidate the entire contents of an IOTLB */
ret = device_flush_iotlb(dev_data, 0, ~0UL,
IOMMU_NO_PASID, false);
}
return ret; return ret;
} }
@ -1476,13 +1459,18 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
* page. Otherwise it flushes the whole TLB of the IOMMU. * page. Otherwise it flushes the whole TLB of the IOMMU.
*/ */
static void __domain_flush_pages(struct protection_domain *domain, static void __domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size, int pde) u64 address, size_t size)
{ {
struct iommu_dev_data *dev_data; struct iommu_dev_data *dev_data;
struct iommu_cmd cmd; struct iommu_cmd cmd;
int ret = 0, i; int ret = 0, i;
ioasid_t pasid = IOMMU_NO_PASID;
bool gn = false;
build_inv_iommu_pages(&cmd, address, size, domain->id, pde); if (pdom_is_v2_pgtbl_mode(domain))
gn = true;
build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn);
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
if (!domain->dev_iommu[i]) if (!domain->dev_iommu[i])
@ -1500,17 +1488,21 @@ static void __domain_flush_pages(struct protection_domain *domain,
if (!dev_data->ats_enabled) if (!dev_data->ats_enabled)
continue; continue;
ret |= device_flush_iotlb(dev_data, address, size); ret |= device_flush_iotlb(dev_data, address, size, pasid, gn);
} }
WARN_ON(ret); WARN_ON(ret);
} }
static void domain_flush_pages(struct protection_domain *domain, void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size, int pde) u64 address, size_t size)
{ {
if (likely(!amd_iommu_np_cache)) { if (likely(!amd_iommu_np_cache)) {
__domain_flush_pages(domain, address, size, pde); __domain_flush_pages(domain, address, size);
/* Wait until IOMMU TLB and all device IOTLB flushes are complete */
amd_iommu_domain_flush_complete(domain);
return; return;
} }
@ -1543,16 +1535,20 @@ static void domain_flush_pages(struct protection_domain *domain,
flush_size = 1ul << min_alignment; flush_size = 1ul << min_alignment;
__domain_flush_pages(domain, address, flush_size, pde); __domain_flush_pages(domain, address, flush_size);
address += flush_size; address += flush_size;
size -= flush_size; size -= flush_size;
} }
/* Wait until IOMMU TLB and all device IOTLB flushes are complete */
amd_iommu_domain_flush_complete(domain);
} }
/* Flush the whole IO/TLB for a given protection domain - including PDE */ /* Flush the whole IO/TLB for a given protection domain - including PDE */
void amd_iommu_domain_flush_tlb_pde(struct protection_domain *domain) static void amd_iommu_domain_flush_all(struct protection_domain *domain)
{ {
domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); amd_iommu_domain_flush_pages(domain, 0,
CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
} }
void amd_iommu_domain_flush_complete(struct protection_domain *domain) void amd_iommu_domain_flush_complete(struct protection_domain *domain)
@ -1579,8 +1575,7 @@ static void domain_flush_np_cache(struct protection_domain *domain,
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
domain_flush_pages(domain, iova, size, 1); amd_iommu_domain_flush_pages(domain, iova, size);
amd_iommu_domain_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
} }
} }
@ -1858,11 +1853,8 @@ static void do_detach(struct iommu_dev_data *dev_data)
/* Flush the DTE entry */ /* Flush the DTE entry */
device_flush_dte(dev_data); device_flush_dte(dev_data);
/* Flush IOTLB */ /* Flush IOTLB and wait for the flushes to finish */
amd_iommu_domain_flush_tlb_pde(domain); amd_iommu_domain_flush_all(domain);
/* Wait for the flushes to finish */
amd_iommu_domain_flush_complete(domain);
/* decrease reference counters - needs to happen after the flushes */ /* decrease reference counters - needs to happen after the flushes */
domain->dev_iommu[iommu->index] -= 1; domain->dev_iommu[iommu->index] -= 1;
@ -1896,15 +1888,6 @@ static int attach_device(struct device *dev,
do_attach(dev_data, domain); do_attach(dev_data, domain);
/*
* We might boot into a crash-kernel here. The crashed kernel
* left the caches in the IOMMU dirty. So we have to flush
* here to evict all dirty stuff.
*/
amd_iommu_domain_flush_tlb_pde(domain);
amd_iommu_domain_flush_complete(domain);
out: out:
spin_unlock(&dev_data->lock); spin_unlock(&dev_data->lock);
@ -2048,8 +2031,7 @@ void amd_iommu_domain_update(struct protection_domain *domain)
amd_iommu_update_and_flush_device_table(domain); amd_iommu_update_and_flush_device_table(domain);
/* Flush domain TLB(s) and wait for completion */ /* Flush domain TLB(s) and wait for completion */
amd_iommu_domain_flush_tlb_pde(domain); amd_iommu_domain_flush_all(domain);
amd_iommu_domain_flush_complete(domain);
} }
/***************************************************************************** /*****************************************************************************
@ -2482,10 +2464,9 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
} }
/* Flush IOTLB to mark IOPTE dirty on the next translation(s) */ /* Flush IOTLB to mark IOPTE dirty on the next translation(s) */
if (domain_flush) { if (domain_flush)
amd_iommu_domain_flush_tlb_pde(pdomain); amd_iommu_domain_flush_all(pdomain);
amd_iommu_domain_flush_complete(pdomain);
}
pdomain->dirty_tracking = enable; pdomain->dirty_tracking = enable;
spin_unlock_irqrestore(&pdomain->lock, flags); spin_unlock_irqrestore(&pdomain->lock, flags);
@ -2588,8 +2569,7 @@ static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dom->lock, flags); spin_lock_irqsave(&dom->lock, flags);
amd_iommu_domain_flush_tlb_pde(dom); amd_iommu_domain_flush_all(dom);
amd_iommu_domain_flush_complete(dom);
spin_unlock_irqrestore(&dom->lock, flags); spin_unlock_irqrestore(&dom->lock, flags);
} }
@ -2600,8 +2580,8 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dom->lock, flags); spin_lock_irqsave(&dom->lock, flags);
domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1); amd_iommu_domain_flush_pages(dom, gather->start,
amd_iommu_domain_flush_complete(dom); gather->end - gather->start + 1);
spin_unlock_irqrestore(&dom->lock, flags); spin_unlock_irqrestore(&dom->lock, flags);
} }
@ -2635,7 +2615,7 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true; return true;
} }
const struct iommu_dirty_ops amd_dirty_ops = { static const struct iommu_dirty_ops amd_dirty_ops = {
.set_dirty_tracking = amd_iommu_set_dirty_tracking, .set_dirty_tracking = amd_iommu_set_dirty_tracking,
.read_and_clear_dirty = amd_iommu_read_and_clear_dirty, .read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
}; };
@ -2666,7 +2646,7 @@ const struct iommu_ops amd_iommu_ops = {
}; };
static int __flush_pasid(struct protection_domain *domain, u32 pasid, static int __flush_pasid(struct protection_domain *domain, u32 pasid,
u64 address, bool size) u64 address, size_t size)
{ {
struct iommu_dev_data *dev_data; struct iommu_dev_data *dev_data;
struct iommu_cmd cmd; struct iommu_cmd cmd;
@ -2675,7 +2655,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
if (!(domain->flags & PD_IOMMUV2_MASK)) if (!(domain->flags & PD_IOMMUV2_MASK))
return -EINVAL; return -EINVAL;
build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
/* /*
* IOMMU TLB needs to be flushed before Device TLB to * IOMMU TLB needs to be flushed before Device TLB to
@ -2709,8 +2689,8 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
iommu = rlookup_amd_iommu(dev_data->dev); iommu = rlookup_amd_iommu(dev_data->dev);
if (!iommu) if (!iommu)
continue; continue;
build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid, build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
qdep, address, size); address, size, pasid, true);
ret = iommu_queue_command(iommu, &cmd); ret = iommu_queue_command(iommu, &cmd);
if (ret != 0) if (ret != 0)
@ -2730,7 +2710,7 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid, static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
u64 address) u64 address)
{ {
return __flush_pasid(domain, pasid, address, false); return __flush_pasid(domain, pasid, address, PAGE_SIZE);
} }
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
@ -2749,8 +2729,7 @@ int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid) static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
{ {
return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
true);
} }
int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid) int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
@ -3111,8 +3090,8 @@ static int alloc_irq_index(struct amd_iommu *iommu, u16 devid, int count,
return index; return index;
} }
static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index, static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
struct irte_ga *irte) struct irte_ga *irte)
{ {
struct irq_remap_table *table; struct irq_remap_table *table;
struct irte_ga *entry; struct irte_ga *entry;
@ -3139,6 +3118,18 @@ static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
raw_spin_unlock_irqrestore(&table->lock, flags); raw_spin_unlock_irqrestore(&table->lock, flags);
return 0;
}
static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
struct irte_ga *irte)
{
bool ret;
ret = __modify_irte_ga(iommu, devid, index, irte);
if (ret)
return ret;
iommu_flush_irt_and_complete(iommu, devid); iommu_flush_irt_and_complete(iommu, devid);
return 0; return 0;
@ -3822,8 +3813,8 @@ int amd_iommu_update_ga(int cpu, bool is_run, void *data)
} }
entry->lo.fields_vapic.is_run = is_run; entry->lo.fields_vapic.is_run = is_run;
return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid, return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry); ir_data->irq_2_irte.index, entry);
} }
EXPORT_SYMBOL(amd_iommu_update_ga); EXPORT_SYMBOL(amd_iommu_update_ga);
#endif #endif

View File

@ -81,6 +81,7 @@
#define DART_T8020_TCR_BYPASS_DAPF BIT(12) #define DART_T8020_TCR_BYPASS_DAPF BIT(12)
#define DART_T8020_TTBR 0x200 #define DART_T8020_TTBR 0x200
#define DART_T8020_USB4_TTBR 0x400
#define DART_T8020_TTBR_VALID BIT(31) #define DART_T8020_TTBR_VALID BIT(31)
#define DART_T8020_TTBR_ADDR_FIELD_SHIFT 0 #define DART_T8020_TTBR_ADDR_FIELD_SHIFT 0
#define DART_T8020_TTBR_SHIFT 12 #define DART_T8020_TTBR_SHIFT 12
@ -368,12 +369,14 @@ apple_dart_t8020_hw_stream_command(struct apple_dart_stream_map *stream_map,
u32 command) u32 command)
{ {
unsigned long flags; unsigned long flags;
int ret; int ret, i;
u32 command_reg; u32 command_reg;
spin_lock_irqsave(&stream_map->dart->lock, flags); spin_lock_irqsave(&stream_map->dart->lock, flags);
writel(stream_map->sidmap[0], stream_map->dart->regs + DART_T8020_STREAM_SELECT); for (i = 0; i < BITS_TO_U32(stream_map->dart->num_streams); i++)
writel(stream_map->sidmap[i],
stream_map->dart->regs + DART_T8020_STREAM_SELECT + 4 * i);
writel(command, stream_map->dart->regs + DART_T8020_STREAM_COMMAND); writel(command, stream_map->dart->regs + DART_T8020_STREAM_COMMAND);
ret = readl_poll_timeout_atomic( ret = readl_poll_timeout_atomic(
@ -740,7 +743,6 @@ static void apple_dart_release_device(struct device *dev)
{ {
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
dev_iommu_priv_set(dev, NULL);
kfree(cfg); kfree(cfg);
} }
@ -908,7 +910,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
ret = apple_dart_merge_master_cfg(group_master_cfg, cfg); ret = apple_dart_merge_master_cfg(group_master_cfg, cfg);
if (ret) { if (ret) {
dev_err(dev, "Failed to merge DART IOMMU grups.\n"); dev_err(dev, "Failed to merge DART IOMMU groups.\n");
iommu_group_put(group); iommu_group_put(group);
res = ERR_PTR(ret); res = ERR_PTR(ret);
goto out; goto out;
@ -1215,6 +1217,33 @@ static const struct apple_dart_hw apple_dart_hw_t8103 = {
.ttbr_shift = DART_T8020_TTBR_SHIFT, .ttbr_shift = DART_T8020_TTBR_SHIFT,
.ttbr_count = 4, .ttbr_count = 4,
}; };
static const struct apple_dart_hw apple_dart_hw_t8103_usb4 = {
.type = DART_T8020,
.irq_handler = apple_dart_t8020_irq,
.invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb,
.oas = 36,
.fmt = APPLE_DART,
.max_sid_count = 64,
.enable_streams = DART_T8020_STREAMS_ENABLE,
.lock = DART_T8020_CONFIG,
.lock_bit = DART_T8020_CONFIG_LOCK,
.error = DART_T8020_ERROR,
.tcr = DART_T8020_TCR,
.tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE,
.tcr_disabled = 0,
.tcr_bypass = 0,
.ttbr = DART_T8020_USB4_TTBR,
.ttbr_valid = DART_T8020_TTBR_VALID,
.ttbr_addr_field_shift = DART_T8020_TTBR_ADDR_FIELD_SHIFT,
.ttbr_shift = DART_T8020_TTBR_SHIFT,
.ttbr_count = 4,
};
static const struct apple_dart_hw apple_dart_hw_t6000 = { static const struct apple_dart_hw apple_dart_hw_t6000 = {
.type = DART_T6000, .type = DART_T6000,
.irq_handler = apple_dart_t8020_irq, .irq_handler = apple_dart_t8020_irq,
@ -1272,7 +1301,7 @@ static __maybe_unused int apple_dart_suspend(struct device *dev)
unsigned int sid, idx; unsigned int sid, idx;
for (sid = 0; sid < dart->num_streams; sid++) { for (sid = 0; sid < dart->num_streams; sid++) {
dart->save_tcr[sid] = readl_relaxed(dart->regs + DART_TCR(dart, sid)); dart->save_tcr[sid] = readl(dart->regs + DART_TCR(dart, sid));
for (idx = 0; idx < dart->hw->ttbr_count; idx++) for (idx = 0; idx < dart->hw->ttbr_count; idx++)
dart->save_ttbr[sid][idx] = dart->save_ttbr[sid][idx] =
readl(dart->regs + DART_TTBR(dart, sid, idx)); readl(dart->regs + DART_TTBR(dart, sid, idx));
@ -1307,6 +1336,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dar
static const struct of_device_id apple_dart_of_match[] = { static const struct of_device_id apple_dart_of_match[] = {
{ .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 }, { .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
{ .compatible = "apple,t8103-usb4-dart", .data = &apple_dart_hw_t8103_usb4 },
{ .compatible = "apple,t8110-dart", .data = &apple_dart_hw_t8110 }, { .compatible = "apple,t8110-dart", .data = &apple_dart_hw_t8110 },
{ .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 }, { .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
{}, {},

View File

@ -246,7 +246,8 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
smmu_domain); smmu_domain);
} }
arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size); arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), start,
size);
} }
static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@ -264,10 +265,11 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events, * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
* but disable translation. * but disable translation.
*/ */
arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, &quiet_cd); arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
&quiet_cd);
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
smmu_mn->cleared = true; smmu_mn->cleared = true;
mutex_unlock(&sva_lock); mutex_unlock(&sva_lock);
@ -325,10 +327,13 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
spin_lock_irqsave(&smmu_domain->devices_lock, flags); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) { list_for_each_entry(master, &smmu_domain->devices, domain_head) {
ret = arm_smmu_write_ctx_desc(master, mm->pasid, cd); ret = arm_smmu_write_ctx_desc(master, mm_get_enqcmd_pasid(mm),
cd);
if (ret) { if (ret) {
list_for_each_entry_from_reverse(master, &smmu_domain->devices, domain_head) list_for_each_entry_from_reverse(
arm_smmu_write_ctx_desc(master, mm->pasid, NULL); master, &smmu_domain->devices, domain_head)
arm_smmu_write_ctx_desc(
master, mm_get_enqcmd_pasid(mm), NULL);
break; break;
} }
} }
@ -358,7 +363,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
list_del(&smmu_mn->list); list_del(&smmu_mn->list);
arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, NULL); arm_smmu_update_ctx_desc_devices(smmu_domain, mm_get_enqcmd_pasid(mm),
NULL);
/* /*
* If we went through clear(), we've already invalidated, and no * If we went through clear(), we've already invalidated, and no
@ -366,7 +372,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
*/ */
if (!smmu_mn->cleared) { if (!smmu_mn->cleared) {
arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0,
0);
} }
/* Frees smmu_mn */ /* Frees smmu_mn */

View File

@ -1063,6 +1063,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
bool cd_live; bool cd_live;
__le64 *cdptr; __le64 *cdptr;
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
struct arm_smmu_device *smmu = master->smmu;
if (WARN_ON(ssid >= (1 << cd_table->s1cdmax))) if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
return -E2BIG; return -E2BIG;
@ -1077,6 +1078,8 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
if (!cd) { /* (5) */ if (!cd) { /* (5) */
val = 0; val = 0;
} else if (cd == &quiet_cd) { /* (4) */ } else if (cd == &quiet_cd) { /* (4) */
if (!(smmu->features & ARM_SMMU_FEAT_STALL_FORCE))
val &= ~(CTXDESC_CD_0_S | CTXDESC_CD_0_R);
val |= CTXDESC_CD_0_TCR_EPD0; val |= CTXDESC_CD_0_TCR_EPD0;
} else if (cd_live) { /* (3) */ } else if (cd_live) { /* (3) */
val &= ~CTXDESC_CD_0_ASID; val &= ~CTXDESC_CD_0_ASID;
@ -1249,7 +1252,7 @@ static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
} }
static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
__le64 *dst) struct arm_smmu_ste *dst)
{ {
/* /*
* This is hideously complicated, but we only really care about * This is hideously complicated, but we only really care about
@ -1267,12 +1270,12 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
* 2. Write everything apart from dword 0, sync, write dword 0, sync * 2. Write everything apart from dword 0, sync, write dword 0, sync
* 3. Update Config, sync * 3. Update Config, sync
*/ */
u64 val = le64_to_cpu(dst[0]); u64 val = le64_to_cpu(dst->data[0]);
bool ste_live = false; bool ste_live = false;
struct arm_smmu_device *smmu = NULL; struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_ctx_desc_cfg *cd_table = NULL; struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
struct arm_smmu_s2_cfg *s2_cfg = NULL; struct arm_smmu_s2_cfg *s2_cfg = NULL;
struct arm_smmu_domain *smmu_domain = NULL; struct arm_smmu_domain *smmu_domain = master->domain;
struct arm_smmu_cmdq_ent prefetch_cmd = { struct arm_smmu_cmdq_ent prefetch_cmd = {
.opcode = CMDQ_OP_PREFETCH_CFG, .opcode = CMDQ_OP_PREFETCH_CFG,
.prefetch = { .prefetch = {
@ -1280,18 +1283,12 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
}, },
}; };
if (master) {
smmu_domain = master->domain;
smmu = master->smmu;
}
if (smmu_domain) { if (smmu_domain) {
switch (smmu_domain->stage) { switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1: case ARM_SMMU_DOMAIN_S1:
cd_table = &master->cd_table; cd_table = &master->cd_table;
break; break;
case ARM_SMMU_DOMAIN_S2: case ARM_SMMU_DOMAIN_S2:
case ARM_SMMU_DOMAIN_NESTED:
s2_cfg = &smmu_domain->s2_cfg; s2_cfg = &smmu_domain->s2_cfg;
break; break;
default: default:
@ -1325,10 +1322,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
else else
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
dst[0] = cpu_to_le64(val); dst->data[0] = cpu_to_le64(val);
dst[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, dst->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING)); STRTAB_STE_1_SHCFG_INCOMING));
dst[2] = 0; /* Nuke the VMID */ dst->data[2] = 0; /* Nuke the VMID */
/* /*
* The SMMU can perform negative caching, so we must sync * The SMMU can perform negative caching, so we must sync
* the STE regardless of whether the old value was live. * the STE regardless of whether the old value was live.
@ -1343,7 +1340,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1; STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
BUG_ON(ste_live); BUG_ON(ste_live);
dst[1] = cpu_to_le64( dst->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
@ -1352,7 +1349,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
if (smmu->features & ARM_SMMU_FEAT_STALLS && if (smmu->features & ARM_SMMU_FEAT_STALLS &&
!master->stall_enabled) !master->stall_enabled)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); dst->data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
@ -1362,7 +1359,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
if (s2_cfg) { if (s2_cfg) {
BUG_ON(ste_live); BUG_ON(ste_live);
dst[2] = cpu_to_le64( dst->data[2] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) |
FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) |
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
@ -1371,18 +1368,18 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 | STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
STRTAB_STE_2_S2R); STRTAB_STE_2_S2R);
dst[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); dst->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK);
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS); val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS);
} }
if (master->ats_enabled) if (master->ats_enabled)
dst[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS, dst->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS,
STRTAB_STE_1_EATS_TRANS)); STRTAB_STE_1_EATS_TRANS));
arm_smmu_sync_ste_for_sid(smmu, sid); arm_smmu_sync_ste_for_sid(smmu, sid);
/* See comment in arm_smmu_write_ctx_desc() */ /* See comment in arm_smmu_write_ctx_desc() */
WRITE_ONCE(dst[0], cpu_to_le64(val)); WRITE_ONCE(dst->data[0], cpu_to_le64(val));
arm_smmu_sync_ste_for_sid(smmu, sid); arm_smmu_sync_ste_for_sid(smmu, sid);
/* It's likely that we'll want to use the new STE soon */ /* It's likely that we'll want to use the new STE soon */
@ -1390,7 +1387,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
} }
static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool force) static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab,
unsigned int nent, bool force)
{ {
unsigned int i; unsigned int i;
u64 val = STRTAB_STE_0_V; u64 val = STRTAB_STE_0_V;
@ -1401,11 +1399,11 @@ static void arm_smmu_init_bypass_stes(__le64 *strtab, unsigned int nent, bool fo
val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS);
for (i = 0; i < nent; ++i) { for (i = 0; i < nent; ++i) {
strtab[0] = cpu_to_le64(val); strtab->data[0] = cpu_to_le64(val);
strtab[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, strtab->data[1] = cpu_to_le64(FIELD_PREP(
STRTAB_STE_1_SHCFG_INCOMING)); STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
strtab[2] = 0; strtab->data[2] = 0;
strtab += STRTAB_STE_DWORDS; strtab++;
} }
} }
@ -2171,7 +2169,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
fmt = ARM_64_LPAE_S1; fmt = ARM_64_LPAE_S1;
finalise_stage_fn = arm_smmu_domain_finalise_s1; finalise_stage_fn = arm_smmu_domain_finalise_s1;
break; break;
case ARM_SMMU_DOMAIN_NESTED:
case ARM_SMMU_DOMAIN_S2: case ARM_SMMU_DOMAIN_S2:
ias = smmu->ias; ias = smmu->ias;
oas = smmu->oas; oas = smmu->oas;
@ -2209,26 +2206,23 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
return 0; return 0;
} }
static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) static struct arm_smmu_ste *
arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
{ {
__le64 *step;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) { if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
struct arm_smmu_strtab_l1_desc *l1_desc; unsigned int idx1, idx2;
int idx;
/* Two-level walk */ /* Two-level walk */
idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS; idx1 = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
l1_desc = &cfg->l1_desc[idx]; idx2 = sid & ((1 << STRTAB_SPLIT) - 1);
idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS; return &cfg->l1_desc[idx1].l2ptr[idx2];
step = &l1_desc->l2ptr[idx];
} else { } else {
/* Simple linear lookup */ /* Simple linear lookup */
step = &cfg->strtab[sid * STRTAB_STE_DWORDS]; return (struct arm_smmu_ste *)&cfg
->strtab[sid * STRTAB_STE_DWORDS];
} }
return step;
} }
static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master) static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
@ -2238,7 +2232,8 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master)
for (i = 0; i < master->num_streams; ++i) { for (i = 0; i < master->num_streams; ++i) {
u32 sid = master->streams[i].id; u32 sid = master->streams[i].id;
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid); struct arm_smmu_ste *step =
arm_smmu_get_step_for_sid(smmu, sid);
/* Bridged PCI devices may end up with duplicated IDs */ /* Bridged PCI devices may end up with duplicated IDs */
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
@ -2649,9 +2644,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
struct arm_smmu_master *master; struct arm_smmu_master *master;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return ERR_PTR(-ENODEV);
if (WARN_ON_ONCE(dev_iommu_priv_get(dev))) if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
@ -2698,7 +2690,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
err_free_master: err_free_master:
kfree(master); kfree(master);
dev_iommu_priv_set(dev, NULL);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
@ -2742,7 +2733,7 @@ static int arm_smmu_enable_nesting(struct iommu_domain *domain)
if (smmu_domain->smmu) if (smmu_domain->smmu)
ret = -EPERM; ret = -EPERM;
else else
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
mutex_unlock(&smmu_domain->init_mutex); mutex_unlock(&smmu_domain->init_mutex);
return ret; return ret;
@ -3769,7 +3760,7 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu)
iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
list_for_each_entry(e, &rmr_list, list) { list_for_each_entry(e, &rmr_list, list) {
__le64 *step; struct arm_smmu_ste *step;
struct iommu_iort_rmr_data *rmr; struct iommu_iort_rmr_data *rmr;
int ret, i; int ret, i;

View File

@ -206,6 +206,11 @@
#define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6) #define STRTAB_L1_DESC_L2PTR_MASK GENMASK_ULL(51, 6)
#define STRTAB_STE_DWORDS 8 #define STRTAB_STE_DWORDS 8
struct arm_smmu_ste {
__le64 data[STRTAB_STE_DWORDS];
};
#define STRTAB_STE_0_V (1UL << 0) #define STRTAB_STE_0_V (1UL << 0)
#define STRTAB_STE_0_CFG GENMASK_ULL(3, 1) #define STRTAB_STE_0_CFG GENMASK_ULL(3, 1)
#define STRTAB_STE_0_CFG_ABORT 0 #define STRTAB_STE_0_CFG_ABORT 0
@ -571,7 +576,7 @@ struct arm_smmu_priq {
struct arm_smmu_strtab_l1_desc { struct arm_smmu_strtab_l1_desc {
u8 span; u8 span;
__le64 *l2ptr; struct arm_smmu_ste *l2ptr;
dma_addr_t l2ptr_dma; dma_addr_t l2ptr_dma;
}; };
@ -710,7 +715,6 @@ struct arm_smmu_master {
enum arm_smmu_domain_stage { enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0, ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2, ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
ARM_SMMU_DOMAIN_BYPASS, ARM_SMMU_DOMAIN_BYPASS,
}; };

View File

@ -243,8 +243,10 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,adreno" }, { .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-gmu" },
{ .compatible = "qcom,mdp4" }, { .compatible = "qcom,mdp4" },
{ .compatible = "qcom,mdss" }, { .compatible = "qcom,mdss" },
{ .compatible = "qcom,qcm2290-mdss" },
{ .compatible = "qcom,sc7180-mdss" }, { .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7180-mss-pil" }, { .compatible = "qcom,sc7180-mss-pil" },
{ .compatible = "qcom,sc7280-mdss" }, { .compatible = "qcom,sc7280-mdss" },

View File

@ -82,6 +82,23 @@ static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
pm_runtime_put_autosuspend(smmu->dev); pm_runtime_put_autosuspend(smmu->dev);
} }
static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
{
/*
* Setup an autosuspend delay to avoid bouncing runpm state.
* Otherwise, if a driver for a suspended consumer device
* unmaps buffers, it will runpm resume/suspend for each one.
*
* For example, when used by a GPU device, when an application
* or game exits, it can trigger unmapping 100s or 1000s of
* buffers. With a runpm cycle for each buffer, that adds up
* to 5-10sec worth of reprogramming the context bank, while
* the system appears to be locked up to the user.
*/
pm_runtime_set_autosuspend_delay(smmu->dev, 20);
pm_runtime_use_autosuspend(smmu->dev);
}
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{ {
return container_of(dom, struct arm_smmu_domain, domain); return container_of(dom, struct arm_smmu_domain, domain);
@ -392,8 +409,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{ {
u32 fsr, fsynr, cbfrsynra; u32 fsr, fsynr, cbfrsynra;
unsigned long iova; unsigned long iova;
struct iommu_domain *domain = dev; struct arm_smmu_domain *smmu_domain = dev;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
int idx = smmu_domain->cfg.cbndx; int idx = smmu_domain->cfg.cbndx;
int ret; int ret;
@ -406,7 +422,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx)); cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
ret = report_iommu_fault(domain, NULL, iova, ret = report_iommu_fault(&smmu_domain->domain, NULL, iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
if (ret == -ENOSYS) if (ret == -ENOSYS)
@ -607,7 +623,7 @@ static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain,
return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks); return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
} }
static int arm_smmu_init_domain_context(struct iommu_domain *domain, static int arm_smmu_init_domain_context(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu, struct arm_smmu_device *smmu,
struct device *dev) struct device *dev)
{ {
@ -616,7 +632,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg pgtbl_cfg; struct io_pgtable_cfg pgtbl_cfg;
enum io_pgtable_fmt fmt; enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct iommu_domain *domain = &smmu_domain->domain;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
irqreturn_t (*context_fault)(int irq, void *dev); irqreturn_t (*context_fault)(int irq, void *dev);
@ -624,12 +640,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu) if (smmu_domain->smmu)
goto out_unlock; goto out_unlock;
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
smmu_domain->smmu = smmu;
goto out_unlock;
}
/* /*
* Mapping the requested stage onto what we support is surprisingly * Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without * complicated, mainly because the spec allows S1+S2 SMMUs without
@ -796,8 +806,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
else else
context_fault = arm_smmu_context_fault; context_fault = arm_smmu_context_fault;
ret = devm_request_irq(smmu->dev, irq, context_fault, ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
IRQF_SHARED, "arm-smmu-context-fault", domain); "arm-smmu-context-fault", smmu_domain);
if (ret < 0) { if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq); cfg->irptndx, irq);
@ -818,14 +828,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
return ret; return ret;
} }
static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain)
{ {
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
int ret, irq; int ret, irq;
if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) if (!smmu)
return; return;
ret = arm_smmu_rpm_get(smmu); ret = arm_smmu_rpm_get(smmu);
@ -841,7 +850,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) { if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
irq = smmu->irqs[cfg->irptndx]; irq = smmu->irqs[cfg->irptndx];
devm_free_irq(smmu->dev, irq, domain); devm_free_irq(smmu->dev, irq, smmu_domain);
} }
free_io_pgtable_ops(smmu_domain->pgtbl_ops); free_io_pgtable_ops(smmu_domain->pgtbl_ops);
@ -850,14 +859,10 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
arm_smmu_rpm_put(smmu); arm_smmu_rpm_put(smmu);
} }
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
{ {
struct arm_smmu_domain *smmu_domain; struct arm_smmu_domain *smmu_domain;
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) {
if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
return NULL;
}
/* /*
* Allocate the domain and initialise some of its data structures. * Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a * We can't really do anything meaningful until we've added a
@ -870,6 +875,15 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
mutex_init(&smmu_domain->init_mutex); mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->cb_lock); spin_lock_init(&smmu_domain->cb_lock);
if (dev) {
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
if (arm_smmu_init_domain_context(smmu_domain, cfg->smmu, dev)) {
kfree(smmu_domain);
return NULL;
}
}
return &smmu_domain->domain; return &smmu_domain->domain;
} }
@ -881,7 +895,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
* Free the domain resources. We assume that all devices have * Free the domain resources. We assume that all devices have
* already been detached. * already been detached.
*/ */
arm_smmu_destroy_domain_context(domain); arm_smmu_destroy_domain_context(smmu_domain);
kfree(smmu_domain); kfree(smmu_domain);
} }
@ -1081,21 +1095,14 @@ static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
mutex_unlock(&smmu->stream_map_mutex); mutex_unlock(&smmu->stream_map_mutex);
} }
static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, static void arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg *cfg,
struct arm_smmu_master_cfg *cfg, enum arm_smmu_s2cr_type type,
struct iommu_fwspec *fwspec) u8 cbndx, struct iommu_fwspec *fwspec)
{ {
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_s2cr *s2cr = smmu->s2crs; struct arm_smmu_s2cr *s2cr = smmu->s2crs;
u8 cbndx = smmu_domain->cfg.cbndx;
enum arm_smmu_s2cr_type type;
int i, idx; int i, idx;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
type = S2CR_TYPE_BYPASS;
else
type = S2CR_TYPE_TRANS;
for_each_cfg_sme(cfg, fwspec, i, idx) { for_each_cfg_sme(cfg, fwspec, i, idx) {
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
continue; continue;
@ -1105,7 +1112,6 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
s2cr[idx].cbndx = cbndx; s2cr[idx].cbndx = cbndx;
arm_smmu_write_s2cr(smmu, idx); arm_smmu_write_s2cr(smmu, idx);
} }
return 0;
} }
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@ -1116,11 +1122,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
int ret; int ret;
if (!fwspec || fwspec->ops != &arm_smmu_ops) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
return -ENXIO;
}
/* /*
* FIXME: The arch/arm DMA API code tries to attach devices to its own * FIXME: The arch/arm DMA API code tries to attach devices to its own
* domains between of_xlate() and probe_device() - we have no way to cope * domains between of_xlate() and probe_device() - we have no way to cope
@ -1139,7 +1140,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return ret; return ret;
/* Ensure that the domain is finalised */ /* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu, dev); ret = arm_smmu_init_domain_context(smmu_domain, smmu, dev);
if (ret < 0) if (ret < 0)
goto rpm_put; goto rpm_put;
@ -1153,27 +1154,66 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
} }
/* Looks ok, so add the device to the domain */ /* Looks ok, so add the device to the domain */
ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec); arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
smmu_domain->cfg.cbndx, fwspec);
/* arm_smmu_rpm_use_autosuspend(smmu);
* Setup an autosuspend delay to avoid bouncing runpm state.
* Otherwise, if a driver for a suspended consumer device
* unmaps buffers, it will runpm resume/suspend for each one.
*
* For example, when used by a GPU device, when an application
* or game exits, it can trigger unmapping 100s or 1000s of
* buffers. With a runpm cycle for each buffer, that adds up
* to 5-10sec worth of reprogramming the context bank, while
* the system appears to be locked up to the user.
*/
pm_runtime_set_autosuspend_delay(smmu->dev, 20);
pm_runtime_use_autosuspend(smmu->dev);
rpm_put: rpm_put:
arm_smmu_rpm_put(smmu); arm_smmu_rpm_put(smmu);
return ret; return ret;
} }
static int arm_smmu_attach_dev_type(struct device *dev,
enum arm_smmu_s2cr_type type)
{
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
int ret;
if (!cfg)
return -ENODEV;
smmu = cfg->smmu;
ret = arm_smmu_rpm_get(smmu);
if (ret < 0)
return ret;
arm_smmu_master_install_s2crs(cfg, type, 0, fwspec);
arm_smmu_rpm_use_autosuspend(smmu);
arm_smmu_rpm_put(smmu);
return 0;
}
static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
struct device *dev)
{
return arm_smmu_attach_dev_type(dev, S2CR_TYPE_BYPASS);
}
static const struct iommu_domain_ops arm_smmu_identity_ops = {
.attach_dev = arm_smmu_attach_dev_identity,
};
static struct iommu_domain arm_smmu_identity_domain = {
.type = IOMMU_DOMAIN_IDENTITY,
.ops = &arm_smmu_identity_ops,
};
static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
struct device *dev)
{
return arm_smmu_attach_dev_type(dev, S2CR_TYPE_FAULT);
}
static const struct iommu_domain_ops arm_smmu_blocked_ops = {
.attach_dev = arm_smmu_attach_dev_blocked,
};
static struct iommu_domain arm_smmu_blocked_domain = {
.type = IOMMU_DOMAIN_BLOCKED,
.ops = &arm_smmu_blocked_ops,
};
static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount, phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped) int prot, gfp_t gfp, size_t *mapped)
@ -1357,10 +1397,8 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
fwspec = dev_iommu_fwspec_get(dev); fwspec = dev_iommu_fwspec_get(dev);
if (ret) if (ret)
goto out_free; goto out_free;
} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} else { } else {
return ERR_PTR(-ENODEV); smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
} }
ret = -EINVAL; ret = -EINVAL;
@ -1427,7 +1465,6 @@ static void arm_smmu_release_device(struct device *dev)
arm_smmu_rpm_put(cfg->smmu); arm_smmu_rpm_put(cfg->smmu);
dev_iommu_priv_set(dev, NULL);
kfree(cfg); kfree(cfg);
} }
@ -1560,8 +1597,10 @@ static int arm_smmu_def_domain_type(struct device *dev)
} }
static struct iommu_ops arm_smmu_ops = { static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc, .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.probe_device = arm_smmu_probe_device, .probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device, .release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize, .probe_finalize = arm_smmu_probe_finalize,
@ -2161,7 +2200,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return err; return err;
} }
err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
using_legacy_binding ? NULL : dev);
if (err) { if (err) {
dev_err(dev, "Failed to register iommu\n"); dev_err(dev, "Failed to register iommu\n");
iommu_device_sysfs_remove(&smmu->iommu); iommu_device_sysfs_remove(&smmu->iommu);

View File

@ -361,7 +361,6 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0, ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2, ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED, ARM_SMMU_DOMAIN_NESTED,
ARM_SMMU_DOMAIN_BYPASS,
}; };
struct arm_smmu_domain { struct arm_smmu_domain {

View File

@ -79,16 +79,6 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
static const struct iommu_ops qcom_iommu_ops; static const struct iommu_ops qcom_iommu_ops;
static struct qcom_iommu_dev * to_iommu(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (!fwspec || fwspec->ops != &qcom_iommu_ops)
return NULL;
return dev_iommu_priv_get(dev);
}
static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid) static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
{ {
struct qcom_iommu_dev *qcom_iommu = d->iommu; struct qcom_iommu_dev *qcom_iommu = d->iommu;
@ -372,7 +362,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{ {
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
int ret; int ret;
@ -404,7 +394,7 @@ static int qcom_iommu_identity_attach(struct iommu_domain *identity_domain,
struct iommu_domain *domain = iommu_get_domain_for_dev(dev); struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct qcom_iommu_domain *qcom_domain; struct qcom_iommu_domain *qcom_domain;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
unsigned int i; unsigned int i;
if (domain == identity_domain || !domain) if (domain == identity_domain || !domain)
@ -535,7 +525,7 @@ static bool qcom_iommu_capable(struct device *dev, enum iommu_cap cap)
static struct iommu_device *qcom_iommu_probe_device(struct device *dev) static struct iommu_device *qcom_iommu_probe_device(struct device *dev)
{ {
struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
struct device_link *link; struct device_link *link;
if (!qcom_iommu) if (!qcom_iommu)
@ -900,8 +890,16 @@ static void qcom_iommu_device_remove(struct platform_device *pdev)
static int __maybe_unused qcom_iommu_resume(struct device *dev) static int __maybe_unused qcom_iommu_resume(struct device *dev)
{ {
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev);
int ret;
return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks); ret = clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks);
if (ret < 0)
return ret;
if (dev->pm_domain)
return qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, 0);
return ret;
} }
static int __maybe_unused qcom_iommu_suspend(struct device *dev) static int __maybe_unused qcom_iommu_suspend(struct device *dev)

View File

@ -29,6 +29,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <trace/events/swiotlb.h>
#include "dma-iommu.h" #include "dma-iommu.h"
@ -1156,6 +1157,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
trace_swiotlb_bounced(dev, phys, size);
aligned_size = iova_align(iovad, size); aligned_size = iova_align(iovad, size);
phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
iova_mask(iovad), dir, attrs); iova_mask(iovad), dir, attrs);

View File

@ -106,9 +106,6 @@ static const struct iommu_regset iommu_regs_64[] = {
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8), IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9), IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9), IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
IOMMU_REGSET_ENTRY(VCCAP),
IOMMU_REGSET_ENTRY(VCMD),
IOMMU_REGSET_ENTRY(VCRSP),
}; };
static struct dentry *intel_iommu_debug; static struct dentry *intel_iommu_debug;

View File

@ -46,9 +46,6 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
#define MAX_AGAW_WIDTH 64
#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1) #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1) #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
@ -63,74 +60,6 @@
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
/* page table handling */
#define LEVEL_STRIDE (9)
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
static inline int agaw_to_level(int agaw)
{
return agaw + 2;
}
static inline int agaw_to_width(int agaw)
{
return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
}
static inline int width_to_agaw(int width)
{
return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
}
static inline unsigned int level_to_offset_bits(int level)
{
return (level - 1) * LEVEL_STRIDE;
}
static inline int pfn_level_offset(u64 pfn, int level)
{
return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
static inline u64 level_mask(int level)
{
return -1ULL << level_to_offset_bits(level);
}
static inline u64 level_size(int level)
{
return 1ULL << level_to_offset_bits(level);
}
static inline u64 align_to_level(u64 pfn, int level)
{
return (pfn + level_size(level) - 1) & level_mask(level);
}
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
{
return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
}
static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
{
return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
}
static inline unsigned long page_to_dma_pfn(struct page *pg)
{
return mm_to_dma_pfn_start(page_to_pfn(pg));
}
static inline unsigned long virt_to_dma_pfn(void *p)
{
return page_to_dma_pfn(virt_to_page(p));
}
static void __init check_tylersburg_isoch(void); static void __init check_tylersburg_isoch(void);
static int rwbf_quirk; static int rwbf_quirk;
@ -168,78 +97,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK; return re->hi & VTD_PAGE_MASK;
} }
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
}
static inline void context_set_fault_enable(struct context_entry *context)
{
context->lo &= (((u64)-1) << 2) | 1;
}
static inline void context_set_translation_type(struct context_entry *context,
unsigned long value)
{
context->lo &= (((u64)-1) << 4) | 3;
context->lo |= (value & 3) << 2;
}
static inline void context_set_address_root(struct context_entry *context,
unsigned long value)
{
context->lo &= ~VTD_PAGE_MASK;
context->lo |= value & VTD_PAGE_MASK;
}
static inline void context_set_address_width(struct context_entry *context,
unsigned long value)
{
context->hi |= value & 7;
}
static inline void context_set_domain_id(struct context_entry *context,
unsigned long value)
{
context->hi |= (value & ((1 << 16) - 1)) << 8;
}
static inline void context_set_pasid(struct context_entry *context)
{
context->lo |= CONTEXT_PASIDE;
}
static inline int context_domain_id(struct context_entry *c)
{
return((c->hi >> 8) & 0xffff);
}
static inline void context_clear_entry(struct context_entry *context)
{
context->lo = 0;
context->hi = 0;
}
static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
if (!iommu->copied_tables)
return false;
return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
}
static inline void
set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
}
static inline void
clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
}
/* /*
* This domain is a statically identity mapping domain. * This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory. * 1. This domain creats a static 1:1 mapping to all usable memory.
@ -383,13 +240,12 @@ void free_pgtable_page(void *vaddr)
free_page((unsigned long)vaddr); free_page((unsigned long)vaddr);
} }
static inline int domain_type_is_si(struct dmar_domain *domain) static int domain_type_is_si(struct dmar_domain *domain)
{ {
return domain->domain.type == IOMMU_DOMAIN_IDENTITY; return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
} }
static inline int domain_pfn_supported(struct dmar_domain *domain, static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
unsigned long pfn)
{ {
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
@ -451,7 +307,7 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
} }
static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu) static bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{ {
return sm_supported(iommu) ? return sm_supported(iommu) ?
ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
@ -703,7 +559,7 @@ static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
return false; return false;
} }
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) static struct intel_iommu *device_lookup_iommu(struct device *dev, u8 *bus, u8 *devfn)
{ {
struct dmar_drhd_unit *drhd = NULL; struct dmar_drhd_unit *drhd = NULL;
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
@ -1574,9 +1430,8 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
} }
/* Notification for newly created mappings */ /* Notification for newly created mappings */
static inline void __mapping_notify_one(struct intel_iommu *iommu, static void __mapping_notify_one(struct intel_iommu *iommu, struct dmar_domain *domain,
struct dmar_domain *domain, unsigned long pfn, unsigned int pages)
unsigned long pfn, unsigned int pages)
{ {
/* /*
* It's a non-present to present mapping. Only flush if caching mode * It's a non-present to present mapping. Only flush if caching mode
@ -1843,7 +1698,7 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
spin_unlock(&iommu->lock); spin_unlock(&iommu->lock);
} }
static inline int guestwidth_to_adjustwidth(int gaw) static int guestwidth_to_adjustwidth(int gaw)
{ {
int agaw; int agaw;
int r = (gaw - 12) % 9; int r = (gaw - 12) % 9;
@ -1877,7 +1732,7 @@ static void domain_exit(struct dmar_domain *domain)
* Value of X in the PDTS field of a scalable mode context entry * Value of X in the PDTS field of a scalable mode context entry
* indicates PASID directory with 2^(X + 7) entries. * indicates PASID directory with 2^(X + 7) entries.
*/ */
static inline unsigned long context_get_sm_pds(struct pasid_table *table) static unsigned long context_get_sm_pds(struct pasid_table *table)
{ {
unsigned long pds, max_pde; unsigned long pds, max_pde;
@ -1889,38 +1744,6 @@ static inline unsigned long context_get_sm_pds(struct pasid_table *table)
return pds - 7; return pds - 7;
} }
/*
* Set the RID_PASID field of a scalable mode context entry. The
* IOMMU hardware will use the PASID value set in this field for
* DMA translations of DMA requests without PASID.
*/
static inline void
context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
{
context->hi |= pasid & ((1 << 20) - 1);
}
/*
* Set the DTE(Device-TLB Enable) field of a scalable mode context
* entry.
*/
static inline void context_set_sm_dte(struct context_entry *context)
{
context->lo |= BIT_ULL(2);
}
/*
* Set the PRE(Page Request Enable) field of a scalable mode context
* entry.
*/
static inline void context_set_sm_pre(struct context_entry *context)
{
context->lo |= BIT_ULL(4);
}
/* Convert value to context PASID directory size field coding. */
#define context_pdts(pds) (((pds) & 0x7) << 9)
static int domain_context_mapping_one(struct dmar_domain *domain, static int domain_context_mapping_one(struct dmar_domain *domain,
struct intel_iommu *iommu, struct intel_iommu *iommu,
struct pasid_table *table, struct pasid_table *table,
@ -2081,14 +1904,11 @@ static int domain_context_mapping_cb(struct pci_dev *pdev,
static int static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev) domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct domain_context_mapping_data data; struct domain_context_mapping_data data;
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
struct pasid_table *table; struct pasid_table *table;
struct intel_iommu *iommu;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
table = intel_pasid_get_table(dev); table = intel_pasid_get_table(dev);
@ -2105,18 +1925,15 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
} }
/* Returns a number of VTD pages, but aligned to MM page size */ /* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr, static unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
size_t size)
{ {
host_addr &= ~PAGE_MASK; host_addr &= ~PAGE_MASK;
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
} }
/* Return largest possible superpage level for a given mapping */ /* Return largest possible superpage level for a given mapping */
static inline int hardware_largepage_caps(struct dmar_domain *domain, static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long iov_pfn, unsigned long phy_pfn, unsigned long pages)
unsigned long phy_pfn,
unsigned long pages)
{ {
int support, level = 1; int support, level = 1;
unsigned long pfnmerge; unsigned long pfnmerge;
@ -2449,15 +2266,10 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
struct device *dev) struct device *dev)
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev); struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu; struct intel_iommu *iommu = info->iommu;
unsigned long flags; unsigned long flags;
u8 bus, devfn;
int ret; int ret;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
ret = domain_attach_iommu(domain, iommu); ret = domain_attach_iommu(domain, iommu);
if (ret) if (ret)
return ret; return ret;
@ -2470,7 +2282,7 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
/* Setup the PASID entry for requests without PASID: */ /* Setup the PASID entry for requests without PASID: */
if (hw_pass_through && domain_type_is_si(domain)) if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain, ret = intel_pasid_setup_pass_through(iommu,
dev, IOMMU_NO_PASID); dev, IOMMU_NO_PASID);
else if (domain->use_first_level) else if (domain->use_first_level)
ret = domain_setup_first_level(iommu, domain, dev, ret = domain_setup_first_level(iommu, domain, dev,
@ -3615,7 +3427,7 @@ void intel_iommu_shutdown(void)
up_write(&dmar_global_lock); up_write(&dmar_global_lock);
} }
static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) static struct intel_iommu *dev_to_intel_iommu(struct device *dev)
{ {
struct iommu_device *iommu_dev = dev_to_iommu_device(dev); struct iommu_device *iommu_dev = dev_to_iommu_device(dev);
@ -3694,7 +3506,7 @@ const struct attribute_group *intel_iommu_groups[] = {
NULL, NULL,
}; };
static inline bool has_external_pci(void) static bool has_external_pci(void)
{ {
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
@ -4119,14 +3931,11 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
int prepare_domain_attach_device(struct iommu_domain *domain, int prepare_domain_attach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu; struct intel_iommu *iommu = info->iommu;
int addr_width; int addr_width;
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return -ENODEV;
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
return -EINVAL; return -EINVAL;
@ -4403,7 +4212,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
u8 bus, devfn; u8 bus, devfn;
int ret; int ret;
iommu = device_to_iommu(dev, &bus, &devfn); iommu = device_lookup_iommu(dev, &bus, &devfn);
if (!iommu || !iommu->iommu.ops) if (!iommu || !iommu->iommu.ops)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
@ -4461,7 +4270,6 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
ret = intel_pasid_alloc_table(dev); ret = intel_pasid_alloc_table(dev);
if (ret) { if (ret) {
dev_err(dev, "PASID table allocation failed\n"); dev_err(dev, "PASID table allocation failed\n");
dev_iommu_priv_set(dev, NULL);
kfree(info); kfree(info);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
@ -4479,7 +4287,6 @@ static void intel_iommu_release_device(struct device *dev)
dmar_remove_one_dev_info(dev); dmar_remove_one_dev_info(dev);
intel_pasid_free_table(dev); intel_pasid_free_table(dev);
intel_iommu_debugfs_remove_dev(info); intel_iommu_debugfs_remove_dev(info);
dev_iommu_priv_set(dev, NULL);
kfree(info); kfree(info);
set_dma_ops(dev, NULL); set_dma_ops(dev, NULL);
} }
@ -4739,8 +4546,9 @@ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
{ {
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dev_pasid_info *curr, *dev_pasid = NULL; struct dev_pasid_info *curr, *dev_pasid = NULL;
struct intel_iommu *iommu = info->iommu;
struct dmar_domain *dmar_domain; struct dmar_domain *dmar_domain;
struct iommu_domain *domain; struct iommu_domain *domain;
unsigned long flags; unsigned long flags;
@ -4811,8 +4619,7 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
goto out_free; goto out_free;
if (domain_type_is_si(dmar_domain)) if (domain_type_is_si(dmar_domain))
ret = intel_pasid_setup_pass_through(iommu, dmar_domain, ret = intel_pasid_setup_pass_through(iommu, dev, pasid);
dev, pasid);
else if (dmar_domain->use_first_level) else if (dmar_domain->use_first_level)
ret = domain_setup_first_level(iommu, dmar_domain, ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid); dev, pasid);

View File

@ -140,9 +140,6 @@
#define DMAR_ECEO_REG 0x408 #define DMAR_ECEO_REG 0x408
#define DMAR_ECRSP_REG 0x410 #define DMAR_ECRSP_REG 0x410
#define DMAR_ECCAP_REG 0x430 #define DMAR_ECCAP_REG 0x430
#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg) #define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg) #define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
@ -854,6 +851,181 @@ static inline bool context_present(struct context_entry *context)
return (context->lo & 1); return (context->lo & 1);
} }
#define LEVEL_STRIDE (9)
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
#define MAX_AGAW_WIDTH (64)
#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
static inline int agaw_to_level(int agaw)
{
return agaw + 2;
}
static inline int agaw_to_width(int agaw)
{
return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
}
static inline int width_to_agaw(int width)
{
return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
}
static inline unsigned int level_to_offset_bits(int level)
{
return (level - 1) * LEVEL_STRIDE;
}
static inline int pfn_level_offset(u64 pfn, int level)
{
return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
static inline u64 level_mask(int level)
{
return -1ULL << level_to_offset_bits(level);
}
static inline u64 level_size(int level)
{
return 1ULL << level_to_offset_bits(level);
}
static inline u64 align_to_level(u64 pfn, int level)
{
return (pfn + level_size(level) - 1) & level_mask(level);
}
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
{
return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
}
static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
{
return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
}
static inline unsigned long page_to_dma_pfn(struct page *pg)
{
return mm_to_dma_pfn_start(page_to_pfn(pg));
}
static inline unsigned long virt_to_dma_pfn(void *p)
{
return page_to_dma_pfn(virt_to_page(p));
}
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
}
static inline void context_set_fault_enable(struct context_entry *context)
{
context->lo &= (((u64)-1) << 2) | 1;
}
static inline void context_set_translation_type(struct context_entry *context,
unsigned long value)
{
context->lo &= (((u64)-1) << 4) | 3;
context->lo |= (value & 3) << 2;
}
static inline void context_set_address_root(struct context_entry *context,
unsigned long value)
{
context->lo &= ~VTD_PAGE_MASK;
context->lo |= value & VTD_PAGE_MASK;
}
static inline void context_set_address_width(struct context_entry *context,
unsigned long value)
{
context->hi |= value & 7;
}
static inline void context_set_domain_id(struct context_entry *context,
unsigned long value)
{
context->hi |= (value & ((1 << 16) - 1)) << 8;
}
static inline void context_set_pasid(struct context_entry *context)
{
context->lo |= CONTEXT_PASIDE;
}
static inline int context_domain_id(struct context_entry *c)
{
return((c->hi >> 8) & 0xffff);
}
static inline void context_clear_entry(struct context_entry *context)
{
context->lo = 0;
context->hi = 0;
}
#ifdef CONFIG_INTEL_IOMMU
static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
if (!iommu->copied_tables)
return false;
return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
}
static inline void
set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
}
static inline void
clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
{
clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
}
#endif /* CONFIG_INTEL_IOMMU */
/*
* Set the RID_PASID field of a scalable mode context entry. The
* IOMMU hardware will use the PASID value set in this field for
* DMA translations of DMA requests without PASID.
*/
static inline void
context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
{
context->hi |= pasid & ((1 << 20) - 1);
}
/*
* Set the DTE(Device-TLB Enable) field of a scalable mode context
* entry.
*/
static inline void context_set_sm_dte(struct context_entry *context)
{
context->lo |= BIT_ULL(2);
}
/*
* Set the PRE(Page Request Enable) field of a scalable mode context
* entry.
*/
static inline void context_set_sm_pre(struct context_entry *context)
{
context->lo |= BIT_ULL(4);
}
/* Convert value to context PASID directory size field coding. */
#define context_pdts(pds) (((pds) & 0x7) << 9)
struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev); struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev);
int dmar_enable_qi(struct intel_iommu *iommu); int dmar_enable_qi(struct intel_iommu *iommu);
@ -900,7 +1072,6 @@ int dmar_ir_support(void);
void *alloc_pgtable_page(int node, gfp_t gfp); void *alloc_pgtable_page(int node, gfp_t gfp);
void free_pgtable_page(void *vaddr); void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu); void iommu_flush_write_buffer(struct intel_iommu *iommu);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
const struct iommu_user_data *user_data); const struct iommu_user_data *user_data);

View File

@ -26,63 +26,6 @@
*/ */
u32 intel_pasid_max_id = PASID_MAX; u32 intel_pasid_max_id = PASID_MAX;
int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid)
{
unsigned long flags;
u8 status_code;
int ret = 0;
u64 res;
raw_spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC);
IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
!(res & VCMD_VRSP_IP), res);
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
status_code = VCMD_VRSP_SC(res);
switch (status_code) {
case VCMD_VRSP_SC_SUCCESS:
*pasid = VCMD_VRSP_RESULT_PASID(res);
break;
case VCMD_VRSP_SC_NO_PASID_AVAIL:
pr_info("IOMMU: %s: No PASID available\n", iommu->name);
ret = -ENOSPC;
break;
default:
ret = -ENODEV;
pr_warn("IOMMU: %s: Unexpected error code %d\n",
iommu->name, status_code);
}
return ret;
}
void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid)
{
unsigned long flags;
u8 status_code;
u64 res;
raw_spin_lock_irqsave(&iommu->register_lock, flags);
dmar_writeq(iommu->reg + DMAR_VCMD_REG,
VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE);
IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq,
!(res & VCMD_VRSP_IP), res);
raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
status_code = VCMD_VRSP_SC(res);
switch (status_code) {
case VCMD_VRSP_SC_SUCCESS:
break;
case VCMD_VRSP_SC_INVALID_PASID:
pr_info("IOMMU: %s: Invalid PASID\n", iommu->name);
break;
default:
pr_warn("IOMMU: %s: Unexpected error code %d\n",
iommu->name, status_code);
}
}
/* /*
* Per device pasid table management: * Per device pasid table management:
*/ */
@ -230,30 +173,6 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
/* /*
* Interfaces for PASID table entry manipulation: * Interfaces for PASID table entry manipulation:
*/ */
static inline void pasid_clear_entry(struct pasid_entry *pe)
{
WRITE_ONCE(pe->val[0], 0);
WRITE_ONCE(pe->val[1], 0);
WRITE_ONCE(pe->val[2], 0);
WRITE_ONCE(pe->val[3], 0);
WRITE_ONCE(pe->val[4], 0);
WRITE_ONCE(pe->val[5], 0);
WRITE_ONCE(pe->val[6], 0);
WRITE_ONCE(pe->val[7], 0);
}
static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
{
WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
WRITE_ONCE(pe->val[1], 0);
WRITE_ONCE(pe->val[2], 0);
WRITE_ONCE(pe->val[3], 0);
WRITE_ONCE(pe->val[4], 0);
WRITE_ONCE(pe->val[5], 0);
WRITE_ONCE(pe->val[6], 0);
WRITE_ONCE(pe->val[7], 0);
}
static void static void
intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore) intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
{ {
@ -269,192 +188,6 @@ intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore)
pasid_clear_entry(pe); pasid_clear_entry(pe);
} }
static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
{
u64 old;
old = READ_ONCE(*ptr);
WRITE_ONCE(*ptr, (old & ~mask) | bits);
}
static inline u64 pasid_get_bits(u64 *ptr)
{
return READ_ONCE(*ptr);
}
/*
* Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
* PASID entry.
*/
static inline void
pasid_set_domain_id(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
}
/*
* Get domain ID value of a scalable mode PASID entry.
*/
static inline u16
pasid_get_domain_id(struct pasid_entry *pe)
{
return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
}
/*
* Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
* of a scalable mode PASID entry.
*/
static inline void
pasid_set_slptr(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
}
/*
* Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
* entry.
*/
static inline void
pasid_set_address_width(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
}
/*
* Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
* of a scalable mode PASID entry.
*/
static inline void
pasid_set_translation_type(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
}
/*
* Enable fault processing by clearing the FPD(Fault Processing
* Disable) field (Bit 1) of a scalable mode PASID entry.
*/
static inline void pasid_set_fault_enable(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 1, 0);
}
/*
* Enable second level A/D bits by setting the SLADE (Second Level
* Access Dirty Enable) field (Bit 9) of a scalable mode PASID
* entry.
*/
static inline void pasid_set_ssade(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
}
/*
* Disable second level A/D bits by clearing the SLADE (Second Level
* Access Dirty Enable) field (Bit 9) of a scalable mode PASID
* entry.
*/
static inline void pasid_clear_ssade(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 9, 0);
}
/*
* Checks if second level A/D bits specifically the SLADE (Second Level
* Access Dirty Enable) field (Bit 9) of a scalable mode PASID
* entry is set.
*/
static inline bool pasid_get_ssade(struct pasid_entry *pe)
{
return pasid_get_bits(&pe->val[0]) & (1 << 9);
}
/*
* Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
* scalable mode PASID entry.
*/
static inline void pasid_set_sre(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 0, 1);
}
/*
* Setup the WPE(Write Protect Enable) field (Bit 132) of a
* scalable mode PASID entry.
*/
static inline void pasid_set_wpe(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
}
/*
* Setup the P(Present) field (Bit 0) of a scalable mode PASID
* entry.
*/
static inline void pasid_set_present(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 0, 1);
}
/*
* Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
* entry.
*/
static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
{
pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
}
/*
* Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
* entry. It is required when XD bit of the first level page table
* entry is about to be set.
*/
static inline void pasid_set_nxe(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
}
/*
* Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
* PASID entry.
*/
static inline void
pasid_set_pgsnp(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
}
/*
* Setup the First Level Page table Pointer field (Bit 140~191)
* of a scalable mode PASID entry.
*/
static inline void
pasid_set_flptr(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
}
/*
* Setup the First Level Paging Mode field (Bit 130~131) of a
* scalable mode PASID entry.
*/
static inline void
pasid_set_flpm(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
}
/*
* Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
* of a scalable mode PASID entry.
*/
static inline void pasid_set_eafe(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
}
static void static void
pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu, pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
u16 did, u32 pasid) u16 did, u32 pasid)
@ -613,9 +346,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
* Skip top levels of page tables for iommu which has less agaw * Skip top levels of page tables for iommu which has less agaw
* than default. Unnecessary for PT mode. * than default. Unnecessary for PT mode.
*/ */
static inline int iommu_skip_agaw(struct dmar_domain *domain, static int iommu_skip_agaw(struct dmar_domain *domain,
struct intel_iommu *iommu, struct intel_iommu *iommu,
struct dma_pte **pgd) struct dma_pte **pgd)
{ {
int agaw; int agaw;
@ -767,7 +500,6 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
* Set up the scalable mode pasid entry for passthrough translation type. * Set up the scalable mode pasid entry for passthrough translation type.
*/ */
int intel_pasid_setup_pass_through(struct intel_iommu *iommu, int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid) struct device *dev, u32 pasid)
{ {
u16 did = FLPT_DEFAULT_DID; u16 did = FLPT_DEFAULT_DID;

View File

@ -22,16 +22,6 @@
#define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1) #define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1)
#define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7)) #define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7))
/* Virtual command interface for enlightened pasid management. */
#define VCMD_CMD_ALLOC 0x1
#define VCMD_CMD_FREE 0x2
#define VCMD_VRSP_IP 0x1
#define VCMD_VRSP_SC(e) (((e) & 0xff) >> 1)
#define VCMD_VRSP_SC_SUCCESS 0
#define VCMD_VRSP_SC_NO_PASID_AVAIL 16
#define VCMD_VRSP_SC_INVALID_PASID 16
#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 16) & 0xfffff)
#define VCMD_CMD_OPERAND(e) ((e) << 16)
/* /*
* Domain ID reserved for pasid entries programmed for first-level * Domain ID reserved for pasid entries programmed for first-level
* only and pass-through transfer modes. * only and pass-through transfer modes.
@ -96,6 +86,216 @@ static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte)
return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7); return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7);
} }
static inline void pasid_clear_entry(struct pasid_entry *pe)
{
WRITE_ONCE(pe->val[0], 0);
WRITE_ONCE(pe->val[1], 0);
WRITE_ONCE(pe->val[2], 0);
WRITE_ONCE(pe->val[3], 0);
WRITE_ONCE(pe->val[4], 0);
WRITE_ONCE(pe->val[5], 0);
WRITE_ONCE(pe->val[6], 0);
WRITE_ONCE(pe->val[7], 0);
}
static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe)
{
WRITE_ONCE(pe->val[0], PASID_PTE_FPD);
WRITE_ONCE(pe->val[1], 0);
WRITE_ONCE(pe->val[2], 0);
WRITE_ONCE(pe->val[3], 0);
WRITE_ONCE(pe->val[4], 0);
WRITE_ONCE(pe->val[5], 0);
WRITE_ONCE(pe->val[6], 0);
WRITE_ONCE(pe->val[7], 0);
}
static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits)
{
u64 old;
old = READ_ONCE(*ptr);
WRITE_ONCE(*ptr, (old & ~mask) | bits);
}
static inline u64 pasid_get_bits(u64 *ptr)
{
return READ_ONCE(*ptr);
}
/*
* Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode
* PASID entry.
*/
static inline void
pasid_set_domain_id(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value);
}
/*
* Get domain ID value of a scalable mode PASID entry.
*/
static inline u16
pasid_get_domain_id(struct pasid_entry *pe)
{
return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0));
}
/*
* Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63)
* of a scalable mode PASID entry.
*/
static inline void
pasid_set_slptr(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value);
}
/*
* Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID
* entry.
*/
static inline void
pasid_set_address_width(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2);
}
/*
* Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8)
* of a scalable mode PASID entry.
*/
static inline void
pasid_set_translation_type(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6);
}
/*
* Enable fault processing by clearing the FPD(Fault Processing
* Disable) field (Bit 1) of a scalable mode PASID entry.
*/
static inline void pasid_set_fault_enable(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 1, 0);
}
/*
* Enable second level A/D bits by setting the SLADE (Second Level
* Access Dirty Enable) field (Bit 9) of a scalable mode PASID
* entry.
*/
static inline void pasid_set_ssade(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 9, 1 << 9);
}
/*
* Disable second level A/D bits by clearing the SLADE (Second Level
* Access Dirty Enable) field (Bit 9) of a scalable mode PASID
* entry.
*/
static inline void pasid_clear_ssade(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 9, 0);
}
/*
* Checks if second level A/D bits specifically the SLADE (Second Level
* Access Dirty Enable) field (Bit 9) of a scalable mode PASID
* entry is set.
*/
static inline bool pasid_get_ssade(struct pasid_entry *pe)
{
return pasid_get_bits(&pe->val[0]) & (1 << 9);
}
/*
* Setup the SRE(Supervisor Request Enable) field (Bit 128) of a
* scalable mode PASID entry.
*/
static inline void pasid_set_sre(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 0, 1);
}
/*
* Setup the WPE(Write Protect Enable) field (Bit 132) of a
* scalable mode PASID entry.
*/
static inline void pasid_set_wpe(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4);
}
/*
* Setup the P(Present) field (Bit 0) of a scalable mode PASID
* entry.
*/
static inline void pasid_set_present(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[0], 1 << 0, 1);
}
/*
* Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID
* entry.
*/
static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
{
pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
}
/*
* Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
* entry. It is required when XD bit of the first level page table
* entry is about to be set.
*/
static inline void pasid_set_nxe(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
}
/*
* Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
* PASID entry.
*/
static inline void
pasid_set_pgsnp(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
}
/*
* Setup the First Level Page table Pointer field (Bit 140~191)
* of a scalable mode PASID entry.
*/
static inline void
pasid_set_flptr(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value);
}
/*
* Setup the First Level Paging Mode field (Bit 130~131) of a
* scalable mode PASID entry.
*/
static inline void
pasid_set_flpm(struct pasid_entry *pe, u64 value)
{
pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2);
}
/*
* Setup the Extended Access Flag Enable (EAFE) field (Bit 135)
* of a scalable mode PASID entry.
*/
static inline void pasid_set_eafe(struct pasid_entry *pe)
{
pasid_set_bits(&pe->val[2], 1 << 7, 1 << 7);
}
extern unsigned int intel_pasid_max_id; extern unsigned int intel_pasid_max_id;
int intel_pasid_alloc_table(struct device *dev); int intel_pasid_alloc_table(struct device *dev);
void intel_pasid_free_table(struct device *dev); void intel_pasid_free_table(struct device *dev);
@ -111,15 +311,12 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
struct device *dev, u32 pasid, struct device *dev, u32 pasid,
bool enabled); bool enabled);
int intel_pasid_setup_pass_through(struct intel_iommu *iommu, int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid); struct device *dev, u32 pasid);
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
u32 pasid, struct dmar_domain *domain); u32 pasid, struct dmar_domain *domain);
void intel_pasid_tear_down_entry(struct intel_iommu *iommu, void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
struct device *dev, u32 pasid, struct device *dev, u32 pasid,
bool fault_ignore); bool fault_ignore);
int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
struct device *dev, u32 pasid); struct device *dev, u32 pasid);
#endif /* __INTEL_PASID_H */ #endif /* __INTEL_PASID_H */

View File

@ -316,21 +316,22 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
} }
static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev, static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
struct mm_struct *mm) struct iommu_domain *domain, ioasid_t pasid)
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev); struct device_domain_info *info = dev_iommu_priv_get(dev);
struct mm_struct *mm = domain->mm;
struct intel_svm_dev *sdev; struct intel_svm_dev *sdev;
struct intel_svm *svm; struct intel_svm *svm;
unsigned long sflags; unsigned long sflags;
int ret = 0; int ret = 0;
svm = pasid_private_find(mm->pasid); svm = pasid_private_find(pasid);
if (!svm) { if (!svm) {
svm = kzalloc(sizeof(*svm), GFP_KERNEL); svm = kzalloc(sizeof(*svm), GFP_KERNEL);
if (!svm) if (!svm)
return -ENOMEM; return -ENOMEM;
svm->pasid = mm->pasid; svm->pasid = pasid;
svm->mm = mm; svm->mm = mm;
INIT_LIST_HEAD_RCU(&svm->devs); INIT_LIST_HEAD_RCU(&svm->devs);
@ -368,7 +369,7 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
/* Setup the pasid table: */ /* Setup the pasid table: */
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, mm->pasid, ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid,
FLPT_DEFAULT_DID, sflags); FLPT_DEFAULT_DID, sflags);
if (ret) if (ret)
goto free_sdev; goto free_sdev;
@ -382,7 +383,7 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
free_svm: free_svm:
if (list_empty(&svm->devs)) { if (list_empty(&svm->devs)) {
mmu_notifier_unregister(&svm->notifier, mm); mmu_notifier_unregister(&svm->notifier, mm);
pasid_private_remove(mm->pasid); pasid_private_remove(pasid);
kfree(svm); kfree(svm);
} }
@ -392,14 +393,9 @@ static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid) void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
{ {
struct intel_svm_dev *sdev; struct intel_svm_dev *sdev;
struct intel_iommu *iommu;
struct intel_svm *svm; struct intel_svm *svm;
struct mm_struct *mm; struct mm_struct *mm;
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
return;
if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev)) if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
return; return;
mm = svm->mm; mm = svm->mm;
@ -750,25 +746,16 @@ int intel_svm_page_response(struct device *dev,
struct iommu_fault_event *evt, struct iommu_fault_event *evt,
struct iommu_page_response *msg) struct iommu_page_response *msg)
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
u8 bus = info->bus, devfn = info->devfn;
struct iommu_fault_page_request *prm; struct iommu_fault_page_request *prm;
struct intel_iommu *iommu;
bool private_present; bool private_present;
bool pasid_present; bool pasid_present;
bool last_page; bool last_page;
u8 bus, devfn;
int ret = 0; int ret = 0;
u16 sid; u16 sid;
if (!dev || !dev_is_pci(dev))
return -ENODEV;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
if (!msg || !evt)
return -EINVAL;
prm = &evt->fault.prm; prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn); sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
@ -822,9 +809,8 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev); struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu; struct intel_iommu *iommu = info->iommu;
struct mm_struct *mm = domain->mm;
return intel_svm_bind_mm(iommu, dev, mm); return intel_svm_bind_mm(iommu, dev, domain, pasid);
} }
static void intel_svm_domain_free(struct iommu_domain *domain) static void intel_svm_domain_free(struct iommu_domain *domain)

View File

@ -188,20 +188,28 @@ static dma_addr_t __arm_lpae_dma_addr(void *pages)
} }
static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
struct io_pgtable_cfg *cfg) struct io_pgtable_cfg *cfg,
void *cookie)
{ {
struct device *dev = cfg->iommu_dev; struct device *dev = cfg->iommu_dev;
int order = get_order(size); int order = get_order(size);
struct page *p;
dma_addr_t dma; dma_addr_t dma;
void *pages; void *pages;
VM_BUG_ON((gfp & __GFP_HIGHMEM)); VM_BUG_ON((gfp & __GFP_HIGHMEM));
p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
if (!p) if (cfg->alloc) {
pages = cfg->alloc(cookie, size, gfp);
} else {
struct page *p;
p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
pages = p ? page_address(p) : NULL;
}
if (!pages)
return NULL; return NULL;
pages = page_address(p);
if (!cfg->coherent_walk) { if (!cfg->coherent_walk) {
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma)) if (dma_mapping_error(dev, dma))
@ -220,18 +228,28 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
out_unmap: out_unmap:
dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n"); dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
dma_unmap_single(dev, dma, size, DMA_TO_DEVICE); dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
out_free: out_free:
__free_pages(p, order); if (cfg->free)
cfg->free(cookie, pages, size);
else
free_pages((unsigned long)pages, order);
return NULL; return NULL;
} }
static void __arm_lpae_free_pages(void *pages, size_t size, static void __arm_lpae_free_pages(void *pages, size_t size,
struct io_pgtable_cfg *cfg) struct io_pgtable_cfg *cfg,
void *cookie)
{ {
if (!cfg->coherent_walk) if (!cfg->coherent_walk)
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
size, DMA_TO_DEVICE); size, DMA_TO_DEVICE);
free_pages((unsigned long)pages, get_order(size));
if (cfg->free)
cfg->free(cookie, pages, size);
else
free_pages((unsigned long)pages, get_order(size));
} }
static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries, static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
@ -373,13 +391,13 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */ /* Grab a pointer to the next level */
pte = READ_ONCE(*ptep); pte = READ_ONCE(*ptep);
if (!pte) { if (!pte) {
cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg); cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
if (!cptep) if (!cptep)
return -ENOMEM; return -ENOMEM;
pte = arm_lpae_install_table(cptep, ptep, 0, data); pte = arm_lpae_install_table(cptep, ptep, 0, data);
if (pte) if (pte)
__arm_lpae_free_pages(cptep, tblsz, cfg); __arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
__arm_lpae_sync_pte(ptep, 1, cfg); __arm_lpae_sync_pte(ptep, 1, cfg);
} }
@ -524,7 +542,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
} }
__arm_lpae_free_pages(start, table_size, &data->iop.cfg); __arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
} }
static void arm_lpae_free_pgtable(struct io_pgtable *iop) static void arm_lpae_free_pgtable(struct io_pgtable *iop)
@ -552,7 +570,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
return 0; return 0;
tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie);
if (!tablep) if (!tablep)
return 0; /* Bytes unmapped */ return 0; /* Bytes unmapped */
@ -575,7 +593,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
pte = arm_lpae_install_table(tablep, ptep, blk_pte, data); pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
if (pte != blk_pte) { if (pte != blk_pte) {
__arm_lpae_free_pages(tablep, tablesz, cfg); __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie);
/* /*
* We may race against someone unmapping another part of this * We may race against someone unmapping another part of this
* block, but anything else is invalid. We can't misinterpret * block, but anything else is invalid. We can't misinterpret
@ -882,7 +900,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
/* Looking good; allocate a pgd */ /* Looking good; allocate a pgd */
data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
GFP_KERNEL, cfg); GFP_KERNEL, cfg, cookie);
if (!data->pgd) if (!data->pgd)
goto out_free_data; goto out_free_data;
@ -984,7 +1002,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
/* Allocate pgd pages */ /* Allocate pgd pages */
data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
GFP_KERNEL, cfg); GFP_KERNEL, cfg, cookie);
if (!data->pgd) if (!data->pgd)
goto out_free_data; goto out_free_data;
@ -1059,7 +1077,7 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL, data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
cfg); cfg, cookie);
if (!data->pgd) if (!data->pgd)
goto out_free_data; goto out_free_data;
@ -1080,26 +1098,31 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
} }
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
.caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
.alloc = arm_64_lpae_alloc_pgtable_s1, .alloc = arm_64_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable, .free = arm_lpae_free_pgtable,
}; };
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
.caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
.alloc = arm_64_lpae_alloc_pgtable_s2, .alloc = arm_64_lpae_alloc_pgtable_s2,
.free = arm_lpae_free_pgtable, .free = arm_lpae_free_pgtable,
}; };
struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
.caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
.alloc = arm_32_lpae_alloc_pgtable_s1, .alloc = arm_32_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable, .free = arm_lpae_free_pgtable,
}; };
struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
.caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
.alloc = arm_32_lpae_alloc_pgtable_s2, .alloc = arm_32_lpae_alloc_pgtable_s2,
.free = arm_lpae_free_pgtable, .free = arm_lpae_free_pgtable,
}; };
struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = { struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
.caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
.alloc = arm_mali_lpae_alloc_pgtable, .alloc = arm_mali_lpae_alloc_pgtable,
.free = arm_lpae_free_pgtable, .free = arm_lpae_free_pgtable,
}; };

View File

@ -34,6 +34,26 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
#endif #endif
}; };
static int check_custom_allocator(enum io_pgtable_fmt fmt,
struct io_pgtable_cfg *cfg)
{
/* No custom allocator, no need to check the format. */
if (!cfg->alloc && !cfg->free)
return 0;
/* When passing a custom allocator, both the alloc and free
* functions should be provided.
*/
if (!cfg->alloc || !cfg->free)
return -EINVAL;
/* Make sure the format supports custom allocators. */
if (io_pgtable_init_table[fmt]->caps & IO_PGTABLE_CAP_CUSTOM_ALLOCATOR)
return 0;
return -EINVAL;
}
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
struct io_pgtable_cfg *cfg, struct io_pgtable_cfg *cfg,
void *cookie) void *cookie)
@ -44,6 +64,9 @@ struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
if (fmt >= IO_PGTABLE_NUM_FMTS) if (fmt >= IO_PGTABLE_NUM_FMTS)
return NULL; return NULL;
if (check_custom_allocator(fmt, cfg))
return NULL;
fns = io_pgtable_init_table[fmt]; fns = io_pgtable_init_table[fmt];
if (!fns) if (!fns)
return NULL; return NULL;

View File

@ -12,32 +12,42 @@
static DEFINE_MUTEX(iommu_sva_lock); static DEFINE_MUTEX(iommu_sva_lock);
/* Allocate a PASID for the mm within range (inclusive) */ /* Allocate a PASID for the mm within range (inclusive) */
static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev) static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
{ {
struct iommu_mm_data *iommu_mm;
ioasid_t pasid; ioasid_t pasid;
int ret = 0;
lockdep_assert_held(&iommu_sva_lock);
if (!arch_pgtable_dma_compat(mm)) if (!arch_pgtable_dma_compat(mm))
return -EBUSY; return ERR_PTR(-EBUSY);
mutex_lock(&iommu_sva_lock); iommu_mm = mm->iommu_mm;
/* Is a PASID already associated with this mm? */ /* Is a PASID already associated with this mm? */
if (mm_valid_pasid(mm)) { if (iommu_mm) {
if (mm->pasid >= dev->iommu->max_pasids) if (iommu_mm->pasid >= dev->iommu->max_pasids)
ret = -EOVERFLOW; return ERR_PTR(-EOVERFLOW);
goto out; return iommu_mm;
} }
iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
if (!iommu_mm)
return ERR_PTR(-ENOMEM);
pasid = iommu_alloc_global_pasid(dev); pasid = iommu_alloc_global_pasid(dev);
if (pasid == IOMMU_PASID_INVALID) { if (pasid == IOMMU_PASID_INVALID) {
ret = -ENOSPC; kfree(iommu_mm);
goto out; return ERR_PTR(-ENOSPC);
} }
mm->pasid = pasid; iommu_mm->pasid = pasid;
ret = 0; INIT_LIST_HEAD(&iommu_mm->sva_domains);
out: /*
mutex_unlock(&iommu_sva_lock); * Make sure the write to mm->iommu_mm is not reordered in front of
return ret; * initialization to iommu_mm fields. If it does, readers may see a
* valid iommu_mm with uninitialized values.
*/
smp_store_release(&mm->iommu_mm, iommu_mm);
return iommu_mm;
} }
/** /**
@ -58,57 +68,60 @@ static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
*/ */
struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
{ {
struct iommu_mm_data *iommu_mm;
struct iommu_domain *domain; struct iommu_domain *domain;
struct iommu_sva *handle; struct iommu_sva *handle;
int ret; int ret;
/* Allocate mm->pasid if necessary. */
ret = iommu_sva_alloc_pasid(mm, dev);
if (ret)
return ERR_PTR(ret);
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
mutex_lock(&iommu_sva_lock); mutex_lock(&iommu_sva_lock);
/* Search for an existing domain. */
domain = iommu_get_domain_for_dev_pasid(dev, mm->pasid, /* Allocate mm->pasid if necessary. */
IOMMU_DOMAIN_SVA); iommu_mm = iommu_alloc_mm_data(mm, dev);
if (IS_ERR(domain)) { if (IS_ERR(iommu_mm)) {
ret = PTR_ERR(domain); ret = PTR_ERR(iommu_mm);
goto out_unlock; goto out_unlock;
} }
if (domain) { handle = kzalloc(sizeof(*handle), GFP_KERNEL);
domain->users++; if (!handle) {
goto out; ret = -ENOMEM;
goto out_unlock;
}
/* Search for an existing domain. */
list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
if (!ret) {
domain->users++;
goto out;
}
} }
/* Allocate a new domain and set it on device pasid. */ /* Allocate a new domain and set it on device pasid. */
domain = iommu_sva_domain_alloc(dev, mm); domain = iommu_sva_domain_alloc(dev, mm);
if (!domain) { if (!domain) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_free_handle;
} }
ret = iommu_attach_device_pasid(domain, dev, mm->pasid); ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
if (ret) if (ret)
goto out_free_domain; goto out_free_domain;
domain->users = 1; domain->users = 1;
list_add(&domain->next, &mm->iommu_mm->sva_domains);
out: out:
mutex_unlock(&iommu_sva_lock); mutex_unlock(&iommu_sva_lock);
handle->dev = dev; handle->dev = dev;
handle->domain = domain; handle->domain = domain;
return handle; return handle;
out_free_domain: out_free_domain:
iommu_domain_free(domain); iommu_domain_free(domain);
out_free_handle:
kfree(handle);
out_unlock: out_unlock:
mutex_unlock(&iommu_sva_lock); mutex_unlock(&iommu_sva_lock);
kfree(handle);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
EXPORT_SYMBOL_GPL(iommu_sva_bind_device); EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
@ -124,12 +137,13 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
void iommu_sva_unbind_device(struct iommu_sva *handle) void iommu_sva_unbind_device(struct iommu_sva *handle)
{ {
struct iommu_domain *domain = handle->domain; struct iommu_domain *domain = handle->domain;
ioasid_t pasid = domain->mm->pasid; struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
struct device *dev = handle->dev; struct device *dev = handle->dev;
mutex_lock(&iommu_sva_lock); mutex_lock(&iommu_sva_lock);
iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
if (--domain->users == 0) { if (--domain->users == 0) {
iommu_detach_device_pasid(domain, dev, pasid); list_del(&domain->next);
iommu_domain_free(domain); iommu_domain_free(domain);
} }
mutex_unlock(&iommu_sva_lock); mutex_unlock(&iommu_sva_lock);
@ -141,7 +155,7 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{ {
struct iommu_domain *domain = handle->domain; struct iommu_domain *domain = handle->domain;
return domain->mm->pasid; return mm_get_enqcmd_pasid(domain->mm);
} }
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
@ -205,8 +219,11 @@ iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
void mm_pasid_drop(struct mm_struct *mm) void mm_pasid_drop(struct mm_struct *mm)
{ {
if (likely(!mm_valid_pasid(mm))) struct iommu_mm_data *iommu_mm = mm->iommu_mm;
if (!iommu_mm)
return; return;
iommu_free_global_pasid(mm->pasid); iommu_free_global_pasid(iommu_mm->pasid);
kfree(iommu_mm);
} }

View File

@ -148,7 +148,7 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
static LIST_HEAD(iommu_device_list); static LIST_HEAD(iommu_device_list);
static DEFINE_SPINLOCK(iommu_device_lock); static DEFINE_SPINLOCK(iommu_device_lock);
static struct bus_type * const iommu_buses[] = { static const struct bus_type * const iommu_buses[] = {
&platform_bus_type, &platform_bus_type,
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
&pci_bus_type, &pci_bus_type,
@ -257,13 +257,6 @@ int iommu_device_register(struct iommu_device *iommu,
/* We need to be able to take module references appropriately */ /* We need to be able to take module references appropriately */
if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
return -EINVAL; return -EINVAL;
/*
* Temporarily enforce global restriction to a single driver. This was
* already the de-facto behaviour, since any possible combination of
* existing drivers would compete for at least the PCI or platform bus.
*/
if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
return -EBUSY;
iommu->ops = ops; iommu->ops = ops;
if (hwdev) if (hwdev)
@ -273,10 +266,8 @@ int iommu_device_register(struct iommu_device *iommu,
list_add_tail(&iommu->list, &iommu_device_list); list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock); spin_unlock(&iommu_device_lock);
for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) { for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++)
iommu_buses[i]->iommu_ops = ops;
err = bus_iommu_probe(iommu_buses[i]); err = bus_iommu_probe(iommu_buses[i]);
}
if (err) if (err)
iommu_device_unregister(iommu); iommu_device_unregister(iommu);
return err; return err;
@ -329,7 +320,6 @@ int iommu_device_register_bus(struct iommu_device *iommu,
list_add_tail(&iommu->list, &iommu_device_list); list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock); spin_unlock(&iommu_device_lock);
bus->iommu_ops = ops;
err = bus_iommu_probe(bus); err = bus_iommu_probe(bus);
if (err) { if (err) {
iommu_device_unregister_bus(iommu, bus, nb); iommu_device_unregister_bus(iommu, bus, nb);
@ -344,6 +334,8 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
{ {
struct dev_iommu *param = dev->iommu; struct dev_iommu *param = dev->iommu;
lockdep_assert_held(&iommu_probe_device_lock);
if (param) if (param)
return param; return param;
@ -368,6 +360,15 @@ static void dev_iommu_free(struct device *dev)
kfree(param); kfree(param);
} }
/*
* Internal equivalent of device_iommu_mapped() for when we care that a device
* actually has API ops, and don't want false positives from VFIO-only groups.
*/
static bool dev_has_iommu(struct device *dev)
{
return dev->iommu && dev->iommu->iommu_dev;
}
static u32 dev_iommu_get_max_pasids(struct device *dev) static u32 dev_iommu_get_max_pasids(struct device *dev)
{ {
u32 max_pasids = 0, bits = 0; u32 max_pasids = 0, bits = 0;
@ -386,6 +387,15 @@ static u32 dev_iommu_get_max_pasids(struct device *dev)
return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids); return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
} }
void dev_iommu_priv_set(struct device *dev, void *priv)
{
/* FSL_PAMU does something weird */
if (!IS_ENABLED(CONFIG_FSL_PAMU))
lockdep_assert_held(&iommu_probe_device_lock);
dev->iommu->priv = priv;
}
EXPORT_SYMBOL_GPL(dev_iommu_priv_set);
/* /*
* Init the dev->iommu and dev->iommu_group in the struct device and get the * Init the dev->iommu and dev->iommu_group in the struct device and get the
* driver probed * driver probed
@ -489,11 +499,26 @@ DEFINE_MUTEX(iommu_probe_device_lock);
static int __iommu_probe_device(struct device *dev, struct list_head *group_list) static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{ {
const struct iommu_ops *ops = dev->bus->iommu_ops; const struct iommu_ops *ops;
struct iommu_fwspec *fwspec;
struct iommu_group *group; struct iommu_group *group;
struct group_device *gdev; struct group_device *gdev;
int ret; int ret;
/*
* For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU
* instances with non-NULL fwnodes, and client devices should have been
* identified with a fwspec by this point. Otherwise, we can currently
* assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can
* be present, and that any of their registered instances has suitable
* ops for probing, and thus cheekily co-opt the same mechanism.
*/
fwspec = dev_iommu_fwspec_get(dev);
if (fwspec && fwspec->ops)
ops = fwspec->ops;
else
ops = iommu_ops_from_fwnode(NULL);
if (!ops) if (!ops)
return -ENODEV; return -ENODEV;
/* /*
@ -618,7 +643,7 @@ static void __iommu_group_remove_device(struct device *dev)
list_del(&device->list); list_del(&device->list);
__iommu_group_free_device(group, device); __iommu_group_free_device(group, device);
if (dev->iommu && dev->iommu->iommu_dev) if (dev_has_iommu(dev))
iommu_deinit_device(dev); iommu_deinit_device(dev);
else else
dev->iommu_group = NULL; dev->iommu_group = NULL;
@ -817,7 +842,7 @@ int iommu_get_group_resv_regions(struct iommu_group *group,
* Non-API groups still expose reserved_regions in sysfs, * Non-API groups still expose reserved_regions in sysfs,
* so filter out calls that get here that way. * so filter out calls that get here that way.
*/ */
if (!device->dev->iommu) if (!dev_has_iommu(device->dev))
break; break;
INIT_LIST_HEAD(&dev_resv_regions); INIT_LIST_HEAD(&dev_resv_regions);
@ -1223,6 +1248,12 @@ void iommu_group_remove_device(struct device *dev)
} }
EXPORT_SYMBOL_GPL(iommu_group_remove_device); EXPORT_SYMBOL_GPL(iommu_group_remove_device);
static struct device *iommu_group_first_dev(struct iommu_group *group)
{
lockdep_assert_held(&group->mutex);
return list_first_entry(&group->devices, struct group_device, list)->dev;
}
/** /**
* iommu_group_for_each_dev - iterate over each device in the group * iommu_group_for_each_dev - iterate over each device in the group
* @group: the group * @group: the group
@ -1750,23 +1781,6 @@ __iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
return __iommu_group_domain_alloc(group, req_type); return __iommu_group_domain_alloc(group, req_type);
} }
/*
* Returns the iommu_ops for the devices in an iommu group.
*
* It is assumed that all devices in an iommu group are managed by a single
* IOMMU unit. Therefore, this returns the dev_iommu_ops of the first device
* in the group.
*/
static const struct iommu_ops *group_iommu_ops(struct iommu_group *group)
{
struct group_device *device =
list_first_entry(&group->devices, struct group_device, list);
lockdep_assert_held(&group->mutex);
return dev_iommu_ops(device->dev);
}
/* /*
* req_type of 0 means "auto" which means to select a domain based on * req_type of 0 means "auto" which means to select a domain based on
* iommu_def_domain_type or what the driver actually supports. * iommu_def_domain_type or what the driver actually supports.
@ -1774,7 +1788,7 @@ static const struct iommu_ops *group_iommu_ops(struct iommu_group *group)
static struct iommu_domain * static struct iommu_domain *
iommu_group_alloc_default_domain(struct iommu_group *group, int req_type) iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
{ {
const struct iommu_ops *ops = group_iommu_ops(group); const struct iommu_ops *ops = dev_iommu_ops(iommu_group_first_dev(group));
struct iommu_domain *dom; struct iommu_domain *dom;
lockdep_assert_held(&group->mutex); lockdep_assert_held(&group->mutex);
@ -1854,7 +1868,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
static int iommu_get_def_domain_type(struct iommu_group *group, static int iommu_get_def_domain_type(struct iommu_group *group,
struct device *dev, int cur_type) struct device *dev, int cur_type)
{ {
const struct iommu_ops *ops = group_iommu_ops(group); const struct iommu_ops *ops = dev_iommu_ops(dev);
int type; int type;
if (!ops->def_domain_type) if (!ops->def_domain_type)
@ -2003,9 +2017,28 @@ int bus_iommu_probe(const struct bus_type *bus)
return 0; return 0;
} }
/**
* iommu_present() - make platform-specific assumptions about an IOMMU
* @bus: bus to check
*
* Do not use this function. You want device_iommu_mapped() instead.
*
* Return: true if some IOMMU is present and aware of devices on the given bus;
* in general it may not be the only IOMMU, and it may not have anything to do
* with whatever device you are ultimately interested in.
*/
bool iommu_present(const struct bus_type *bus) bool iommu_present(const struct bus_type *bus)
{ {
return bus->iommu_ops != NULL; bool ret = false;
for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
if (iommu_buses[i] == bus) {
spin_lock(&iommu_device_lock);
ret = !list_empty(&iommu_device_list);
spin_unlock(&iommu_device_lock);
}
}
return ret;
} }
EXPORT_SYMBOL_GPL(iommu_present); EXPORT_SYMBOL_GPL(iommu_present);
@ -2021,7 +2054,7 @@ bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
{ {
const struct iommu_ops *ops; const struct iommu_ops *ops;
if (!dev->iommu || !dev->iommu->iommu_dev) if (!dev_has_iommu(dev))
return false; return false;
ops = dev_iommu_ops(dev); ops = dev_iommu_ops(dev);
@ -2107,6 +2140,7 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
domain->type = type; domain->type = type;
domain->owner = ops;
/* /*
* If not already set, assume all sizes by default; the driver * If not already set, assume all sizes by default; the driver
* may override this later * may override this later
@ -2132,21 +2166,37 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
static struct iommu_domain * static struct iommu_domain *
__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
{ {
struct device *dev = struct device *dev = iommu_group_first_dev(group);
list_first_entry(&group->devices, struct group_device, list)
->dev;
return __iommu_domain_alloc(group_iommu_ops(group), dev, type); return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type);
}
static int __iommu_domain_alloc_dev(struct device *dev, void *data)
{
const struct iommu_ops **ops = data;
if (!dev_has_iommu(dev))
return 0;
if (WARN_ONCE(*ops && *ops != dev_iommu_ops(dev),
"Multiple IOMMU drivers present for bus %s, which the public IOMMU API can't fully support yet. You will still need to disable one or more for this to work, sorry!\n",
dev_bus_name(dev)))
return -EBUSY;
*ops = dev_iommu_ops(dev);
return 0;
} }
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
{ {
const struct iommu_ops *ops = NULL;
int err = bus_for_each_dev(bus, NULL, &ops, __iommu_domain_alloc_dev);
struct iommu_domain *domain; struct iommu_domain *domain;
if (bus == NULL || bus->iommu_ops == NULL) if (err || !ops)
return NULL; return NULL;
domain = __iommu_domain_alloc(bus->iommu_ops, NULL,
IOMMU_DOMAIN_UNMANAGED); domain = __iommu_domain_alloc(ops, NULL, IOMMU_DOMAIN_UNMANAGED);
if (IS_ERR(domain)) if (IS_ERR(domain))
return NULL; return NULL;
return domain; return domain;
@ -2284,10 +2334,16 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
static int __iommu_attach_group(struct iommu_domain *domain, static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group) struct iommu_group *group)
{ {
struct device *dev;
if (group->domain && group->domain != group->default_domain && if (group->domain && group->domain != group->default_domain &&
group->domain != group->blocking_domain) group->domain != group->blocking_domain)
return -EBUSY; return -EBUSY;
dev = iommu_group_first_dev(group);
if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
return -EINVAL;
return __iommu_group_set_domain(group, domain); return __iommu_group_set_domain(group, domain);
} }
@ -3004,8 +3060,8 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
*/ */
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
{ {
if (dev->iommu && dev->iommu->iommu_dev) { if (dev_has_iommu(dev)) {
const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->dev_enable_feat) if (ops->dev_enable_feat)
return ops->dev_enable_feat(dev, feat); return ops->dev_enable_feat(dev, feat);
@ -3020,8 +3076,8 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
*/ */
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{ {
if (dev->iommu && dev->iommu->iommu_dev) { if (dev_has_iommu(dev)) {
const struct iommu_ops *ops = dev->iommu->iommu_dev->ops; const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->dev_disable_feat) if (ops->dev_disable_feat)
return ops->dev_disable_feat(dev, feat); return ops->dev_disable_feat(dev, feat);
@ -3481,6 +3537,9 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
if (!group) if (!group)
return -ENODEV; return -ENODEV;
if (!dev_has_iommu(dev) || dev_iommu_ops(dev) != domain->owner)
return -EINVAL;
mutex_lock(&group->mutex); mutex_lock(&group->mutex);
curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL); curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
if (curr) { if (curr) {
@ -3569,6 +3628,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
domain->type = IOMMU_DOMAIN_SVA; domain->type = IOMMU_DOMAIN_SVA;
mmgrab(mm); mmgrab(mm);
domain->mm = mm; domain->mm = mm;
domain->owner = ops;
domain->iopf_handler = iommu_sva_handle_iopf; domain->iopf_handler = iommu_sva_handle_iopf;
domain->fault_data = mm; domain->fault_data = mm;

View File

@ -135,6 +135,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
hwpt->domain = NULL; hwpt->domain = NULL;
goto out_abort; goto out_abort;
} }
hwpt->domain->owner = ops;
} else { } else {
hwpt->domain = iommu_domain_alloc(idev->dev->bus); hwpt->domain = iommu_domain_alloc(idev->dev->bus);
if (!hwpt->domain) { if (!hwpt->domain) {
@ -233,6 +234,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
hwpt->domain = NULL; hwpt->domain = NULL;
goto out_abort; goto out_abort;
} }
hwpt->domain->owner = ops;
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) { if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
rc = -EINVAL; rc = -EINVAL;

View File

@ -863,16 +863,11 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
static struct iommu_device *mtk_iommu_probe_device(struct device *dev) static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{ {
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_data *data; struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
struct device_link *link; struct device_link *link;
struct device *larbdev; struct device *larbdev;
unsigned int larbid, larbidx, i; unsigned int larbid, larbidx, i;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return ERR_PTR(-ENODEV); /* Not a iommu client device */
data = dev_iommu_priv_get(dev);
if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
return &data->iommu; return &data->iommu;

View File

@ -481,9 +481,6 @@ static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
idx++; idx++;
} }
if (!fwspec || fwspec->ops != &mtk_iommu_v1_ops)
return ERR_PTR(-ENODEV); /* Not a iommu client device */
data = dev_iommu_priv_get(dev); data = dev_iommu_priv_get(dev);
/* Link the consumer device with the smi-larb device(supplier) */ /* Link the consumer device with the smi-larb device(supplier) */

View File

@ -17,8 +17,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/fsl/mc.h> #include <linux/fsl/mc.h>
#define NO_IOMMU 1
static int of_iommu_xlate(struct device *dev, static int of_iommu_xlate(struct device *dev,
struct of_phandle_args *iommu_spec) struct of_phandle_args *iommu_spec)
{ {
@ -29,7 +27,7 @@ static int of_iommu_xlate(struct device *dev,
ops = iommu_ops_from_fwnode(fwnode); ops = iommu_ops_from_fwnode(fwnode);
if ((ops && !ops->of_xlate) || if ((ops && !ops->of_xlate) ||
!of_device_is_available(iommu_spec->np)) !of_device_is_available(iommu_spec->np))
return NO_IOMMU; return -ENODEV;
ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops);
if (ret) if (ret)
@ -61,7 +59,7 @@ static int of_iommu_configure_dev_id(struct device_node *master_np,
"iommu-map-mask", &iommu_spec.np, "iommu-map-mask", &iommu_spec.np,
iommu_spec.args); iommu_spec.args);
if (err) if (err)
return err == -ENODEV ? NO_IOMMU : err; return err;
err = of_iommu_xlate(dev, &iommu_spec); err = of_iommu_xlate(dev, &iommu_spec);
of_node_put(iommu_spec.np); of_node_put(iommu_spec.np);
@ -72,7 +70,7 @@ static int of_iommu_configure_dev(struct device_node *master_np,
struct device *dev) struct device *dev)
{ {
struct of_phandle_args iommu_spec; struct of_phandle_args iommu_spec;
int err = NO_IOMMU, idx = 0; int err = -ENODEV, idx = 0;
while (!of_parse_phandle_with_args(master_np, "iommus", while (!of_parse_phandle_with_args(master_np, "iommus",
"#iommu-cells", "#iommu-cells",
@ -107,16 +105,21 @@ static int of_iommu_configure_device(struct device_node *master_np,
of_iommu_configure_dev(master_np, dev); of_iommu_configure_dev(master_np, dev);
} }
const struct iommu_ops *of_iommu_configure(struct device *dev, /*
struct device_node *master_np, * Returns:
const u32 *id) * 0 on success, an iommu was configured
* -ENODEV if the device does not have any IOMMU
* -EPROBEDEFER if probing should be tried again
* -errno fatal errors
*/
int of_iommu_configure(struct device *dev, struct device_node *master_np,
const u32 *id)
{ {
const struct iommu_ops *ops = NULL;
struct iommu_fwspec *fwspec; struct iommu_fwspec *fwspec;
int err = NO_IOMMU; int err;
if (!master_np) if (!master_np)
return NULL; return -ENODEV;
/* Serialise to make dev->iommu stable under our potential fwspec */ /* Serialise to make dev->iommu stable under our potential fwspec */
mutex_lock(&iommu_probe_device_lock); mutex_lock(&iommu_probe_device_lock);
@ -124,7 +127,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
if (fwspec) { if (fwspec) {
if (fwspec->ops) { if (fwspec->ops) {
mutex_unlock(&iommu_probe_device_lock); mutex_unlock(&iommu_probe_device_lock);
return fwspec->ops; return 0;
} }
/* In the deferred case, start again from scratch */ /* In the deferred case, start again from scratch */
iommu_fwspec_free(dev); iommu_fwspec_free(dev);
@ -147,36 +150,21 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
} else { } else {
err = of_iommu_configure_device(master_np, dev, id); err = of_iommu_configure_device(master_np, dev, id);
} }
/*
* Two success conditions can be represented by non-negative err here:
* >0 : there is no IOMMU, or one was unavailable for non-fatal reasons
* 0 : we found an IOMMU, and dev->fwspec is initialised appropriately
* <0 : any actual error
*/
if (!err) {
/* The fwspec pointer changed, read it again */
fwspec = dev_iommu_fwspec_get(dev);
ops = fwspec->ops;
}
mutex_unlock(&iommu_probe_device_lock); mutex_unlock(&iommu_probe_device_lock);
/* if (err == -ENODEV || err == -EPROBE_DEFER)
* If we have reason to believe the IOMMU driver missed the initial return err;
* probe for dev, replay it to get things in order. if (err)
*/ goto err_log;
if (!err && dev->bus)
err = iommu_probe_device(dev);
/* Ignore all other errors apart from EPROBE_DEFER */ err = iommu_probe_device(dev);
if (err == -EPROBE_DEFER) { if (err)
ops = ERR_PTR(err); goto err_log;
} else if (err < 0) { return 0;
dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
ops = NULL;
}
return ops; err_log:
dev_dbg(dev, "Adding to IOMMU failed: %pe\n", ERR_PTR(err));
return err;
} }
static enum iommu_resv_type __maybe_unused static enum iommu_resv_type __maybe_unused
@ -260,7 +248,14 @@ void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
phys_addr_t iova; phys_addr_t iova;
size_t length; size_t length;
if (of_dma_is_coherent(dev->of_node))
prot |= IOMMU_CACHE;
maps = of_translate_dma_region(np, maps, &iova, &length); maps = of_translate_dma_region(np, maps, &iova, &length);
if (length == 0) {
dev_warn(dev, "Cannot reserve IOVA region of 0 size\n");
continue;
}
type = iommu_resv_region_get_type(dev, &phys, iova, length); type = iommu_resv_region_get_type(dev, &phys, iova, length);
region = iommu_alloc_resv_region(iova, length, prot, type, region = iommu_alloc_resv_region(iova, length, prot, type,

View File

@ -1719,7 +1719,6 @@ static void omap_iommu_release_device(struct device *dev)
if (!dev->of_node || !arch_data) if (!dev->of_node || !arch_data)
return; return;
dev_iommu_priv_set(dev, NULL);
kfree(arch_data); kfree(arch_data);
} }

View File

@ -385,13 +385,7 @@ static phys_addr_t sprd_iommu_iova_to_phys(struct iommu_domain *domain,
static struct iommu_device *sprd_iommu_probe_device(struct device *dev) static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
{ {
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
struct sprd_iommu_device *sdev;
if (!fwspec || fwspec->ops != &sprd_iommu_ops)
return ERR_PTR(-ENODEV);
sdev = dev_iommu_priv_get(dev);
return &sdev->iommu; return &sdev->iommu;
} }

View File

@ -843,7 +843,7 @@ static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
.flags = cpu_to_le32(flags), .flags = cpu_to_le32(flags),
}; };
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); ret = viommu_add_req(vdomain->viommu, &map, sizeof(map));
if (ret) { if (ret) {
viommu_del_mappings(vdomain, iova, end); viommu_del_mappings(vdomain, iova, end);
return ret; return ret;
@ -912,6 +912,33 @@ static void viommu_iotlb_sync(struct iommu_domain *domain,
viommu_sync_req(vdomain->viommu); viommu_sync_req(vdomain->viommu);
} }
static int viommu_iotlb_sync_map(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
/*
* May be called before the viommu is initialized including
* while creating direct mapping
*/
if (!vdomain->nr_endpoints)
return 0;
return viommu_sync_req(vdomain->viommu);
}
static void viommu_flush_iotlb_all(struct iommu_domain *domain)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
/*
* May be called before the viommu is initialized including
* while creating direct mapping
*/
if (!vdomain->nr_endpoints)
return;
viommu_sync_req(vdomain->viommu);
}
static void viommu_get_resv_regions(struct device *dev, struct list_head *head) static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
{ {
struct iommu_resv_region *entry, *new_entry, *msi = NULL; struct iommu_resv_region *entry, *new_entry, *msi = NULL;
@ -969,9 +996,6 @@ static struct iommu_device *viommu_probe_device(struct device *dev)
struct viommu_dev *viommu = NULL; struct viommu_dev *viommu = NULL;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
if (!fwspec || fwspec->ops != &viommu_ops)
return ERR_PTR(-ENODEV);
viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode); viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
if (!viommu) if (!viommu)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
@ -1037,6 +1061,8 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
switch (cap) { switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY: case IOMMU_CAP_CACHE_COHERENCY:
return true; return true;
case IOMMU_CAP_DEFERRED_FLUSH:
return true;
default: default:
return false; return false;
} }
@ -1057,7 +1083,9 @@ static struct iommu_ops viommu_ops = {
.map_pages = viommu_map_pages, .map_pages = viommu_map_pages,
.unmap_pages = viommu_unmap_pages, .unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys, .iova_to_phys = viommu_iova_to_phys,
.flush_iotlb_all = viommu_flush_iotlb_all,
.iotlb_sync = viommu_iotlb_sync, .iotlb_sync = viommu_iotlb_sync,
.iotlb_sync_map = viommu_iotlb_sync_map,
.free = viommu_domain_free, .free = viommu_domain_free,
} }
}; };

View File

@ -114,9 +114,12 @@ static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev) static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
{ {
#if IS_ENABLED(CONFIG_IOMMU_API) #if IS_ENABLED(CONFIG_IOMMU_API)
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct of_phandle_args args; struct of_phandle_args args;
unsigned int i, index = 0; unsigned int i, index = 0;
u32 sid;
if (!tegra_dev_iommu_get_stream_id(dev, &sid))
return 0;
while (!of_parse_phandle_with_args(dev->of_node, "interconnects", "#interconnect-cells", while (!of_parse_phandle_with_args(dev->of_node, "interconnects", "#interconnect-cells",
index, &args)) { index, &args)) {
@ -124,11 +127,10 @@ static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
for (i = 0; i < mc->soc->num_clients; i++) { for (i = 0; i < mc->soc->num_clients; i++) {
const struct tegra_mc_client *client = &mc->soc->clients[i]; const struct tegra_mc_client *client = &mc->soc->clients[i];
if (client->id == args.args[0]) { if (client->id == args.args[0])
u32 sid = fwspec->ids[0] & MC_SID_STREAMID_OVERRIDE_MASK; tegra186_mc_client_sid_override(
mc, client,
tegra186_mc_client_sid_override(mc, client, sid); sid & MC_SID_STREAMID_OVERRIDE_MASK);
}
} }
} }

View File

@ -93,12 +93,12 @@ of_dma_set_restricted_buffer(struct device *dev, struct device_node *np)
int of_dma_configure_id(struct device *dev, struct device_node *np, int of_dma_configure_id(struct device *dev, struct device_node *np,
bool force_dma, const u32 *id) bool force_dma, const u32 *id)
{ {
const struct iommu_ops *iommu;
const struct bus_dma_region *map = NULL; const struct bus_dma_region *map = NULL;
struct device_node *bus_np; struct device_node *bus_np;
u64 dma_start = 0; u64 dma_start = 0;
u64 mask, end, size = 0; u64 mask, end, size = 0;
bool coherent; bool coherent;
int iommu_ret;
int ret; int ret;
if (np == dev->of_node) if (np == dev->of_node)
@ -181,21 +181,29 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
dev_dbg(dev, "device is%sdma coherent\n", dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not "); coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np, id); iommu_ret = of_iommu_configure(dev, np, id);
if (PTR_ERR(iommu) == -EPROBE_DEFER) { if (iommu_ret == -EPROBE_DEFER) {
/* Don't touch range map if it wasn't set from a valid dma-ranges */ /* Don't touch range map if it wasn't set from a valid dma-ranges */
if (!ret) if (!ret)
dev->dma_range_map = NULL; dev->dma_range_map = NULL;
kfree(map); kfree(map);
return -EPROBE_DEFER; return -EPROBE_DEFER;
} } else if (iommu_ret == -ENODEV) {
dev_dbg(dev, "device is not behind an iommu\n");
} else if (iommu_ret) {
dev_err(dev, "iommu configuration for device failed with %pe\n",
ERR_PTR(iommu_ret));
dev_dbg(dev, "device is%sbehind an iommu\n", /*
iommu ? " " : " not "); * Historically this routine doesn't fail driver probing
* due to errors in of_iommu_configure()
*/
} else
dev_dbg(dev, "device is behind an iommu\n");
arch_setup_dma_ops(dev, dma_start, size, iommu, coherent); arch_setup_dma_ops(dev, dma_start, size, coherent);
if (!iommu) if (iommu_ret)
of_dma_set_restricted_buffer(dev, np); of_dma_set_restricted_buffer(dev, np);
return 0; return 0;

View File

@ -719,6 +719,8 @@ struct acpi_pci_root {
/* helper */ /* helper */
struct iommu_ops;
bool acpi_dma_supported(const struct acpi_device *adev); bool acpi_dma_supported(const struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_iommu_fwspec_init(struct device *dev, u32 id, int acpi_iommu_fwspec_init(struct device *dev, u32 id,

View File

@ -42,7 +42,6 @@ struct class;
struct subsys_private; struct subsys_private;
struct device_node; struct device_node;
struct fwnode_handle; struct fwnode_handle;
struct iommu_ops;
struct iommu_group; struct iommu_group;
struct dev_pin_info; struct dev_pin_info;
struct dev_iommu; struct dev_iommu;

View File

@ -62,9 +62,6 @@ struct fwnode_handle;
* this bus. * this bus.
* @pm: Power management operations of this bus, callback the specific * @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops. * device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
* driver implementations to a bus and allow the driver to do
* bus-specific setup
* @need_parent_lock: When probing or removing a device on this bus, the * @need_parent_lock: When probing or removing a device on this bus, the
* device core should lock the device's parent. * device core should lock the device's parent.
* *
@ -104,8 +101,6 @@ struct bus_type {
const struct dev_pm_ops *pm; const struct dev_pm_ops *pm;
const struct iommu_ops *iommu_ops;
bool need_parent_lock; bool need_parent_lock;
}; };

View File

@ -11,6 +11,7 @@
#include <linux/slab.h> #include <linux/slab.h>
struct cma; struct cma;
struct iommu_ops;
/* /*
* Values for struct dma_map_ops.flags: * Values for struct dma_map_ops.flags:
@ -426,10 +427,10 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent); bool coherent);
#else #else
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size, const struct iommu_ops *iommu, bool coherent) u64 size, bool coherent)
{ {
} }
#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */

View File

@ -100,6 +100,30 @@ struct io_pgtable_cfg {
const struct iommu_flush_ops *tlb; const struct iommu_flush_ops *tlb;
struct device *iommu_dev; struct device *iommu_dev;
/**
* @alloc: Custom page allocator.
*
* Optional hook used to allocate page tables. If this function is NULL,
* @free must be NULL too.
*
* Memory returned should be zeroed and suitable for dma_map_single() and
* virt_to_phys().
*
* Not all formats support custom page allocators. Before considering
* passing a non-NULL value, make sure the chosen page format supports
* this feature.
*/
void *(*alloc)(void *cookie, size_t size, gfp_t gfp);
/**
* @free: Custom page de-allocator.
*
* Optional hook used to free page tables allocated with the @alloc
* hook. Must be non-NULL if @alloc is not NULL, must be NULL
* otherwise.
*/
void (*free)(void *cookie, void *pages, size_t size);
/* Low-level data specific to the table format */ /* Low-level data specific to the table format */
union { union {
struct { struct {
@ -241,16 +265,26 @@ io_pgtable_tlb_add_page(struct io_pgtable *iop,
iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie); iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
} }
/**
* enum io_pgtable_caps - IO page table backend capabilities.
*/
enum io_pgtable_caps {
/** @IO_PGTABLE_CAP_CUSTOM_ALLOCATOR: Backend accepts custom page table allocators. */
IO_PGTABLE_CAP_CUSTOM_ALLOCATOR = BIT(0),
};
/** /**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format. * particular format.
* *
* @alloc: Allocate a set of page tables described by cfg. * @alloc: Allocate a set of page tables described by cfg.
* @free: Free the page tables associated with iop. * @free: Free the page tables associated with iop.
* @caps: Combination of @io_pgtable_caps flags encoding the backend capabilities.
*/ */
struct io_pgtable_init_fns { struct io_pgtable_init_fns {
struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
void (*free)(struct io_pgtable *iop); void (*free)(struct io_pgtable *iop);
u32 caps;
}; };
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;

View File

@ -106,7 +106,7 @@ struct iommu_domain {
unsigned type; unsigned type;
const struct iommu_domain_ops *ops; const struct iommu_domain_ops *ops;
const struct iommu_dirty_ops *dirty_ops; const struct iommu_dirty_ops *dirty_ops;
const struct iommu_ops *owner; /* Whose domain_alloc we came from */
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
struct iommu_domain_geometry geometry; struct iommu_domain_geometry geometry;
struct iommu_dma_cookie *iova_cookie; struct iommu_dma_cookie *iova_cookie;
@ -121,6 +121,11 @@ struct iommu_domain {
struct { /* IOMMU_DOMAIN_SVA */ struct { /* IOMMU_DOMAIN_SVA */
struct mm_struct *mm; struct mm_struct *mm;
int users; int users;
/*
* Next iommu_domain in mm->iommu_mm->sva-domains list
* protected by iommu_sva_lock.
*/
struct list_head next;
}; };
}; };
}; };
@ -812,6 +817,11 @@ struct iommu_sva {
struct iommu_domain *domain; struct iommu_domain *domain;
}; };
struct iommu_mm_data {
u32 pasid;
struct list_head sva_domains;
};
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops); const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev); void iommu_fwspec_free(struct device *dev);
@ -840,10 +850,7 @@ static inline void *dev_iommu_priv_get(struct device *dev)
return NULL; return NULL;
} }
static inline void dev_iommu_priv_set(struct device *dev, void *priv) void dev_iommu_priv_set(struct device *dev, void *priv);
{
dev->iommu->priv = priv;
}
extern struct mutex iommu_probe_device_lock; extern struct mutex iommu_probe_device_lock;
int iommu_probe_device(struct device *dev); int iommu_probe_device(struct device *dev);
@ -1337,15 +1344,33 @@ static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream
return false; return false;
} }
#ifdef CONFIG_IOMMU_SVA #ifdef CONFIG_IOMMU_MM_DATA
static inline void mm_pasid_init(struct mm_struct *mm) static inline void mm_pasid_init(struct mm_struct *mm)
{ {
mm->pasid = IOMMU_PASID_INVALID; /*
* During dup_mm(), a new mm will be memcpy'd from an old one and that makes
* the new mm and the old one point to a same iommu_mm instance. When either
* one of the two mms gets released, the iommu_mm instance is freed, leaving
* the other mm running into a use-after-free/double-free problem. To avoid
* the problem, zeroing the iommu_mm pointer of a new mm is needed here.
*/
mm->iommu_mm = NULL;
} }
static inline bool mm_valid_pasid(struct mm_struct *mm) static inline bool mm_valid_pasid(struct mm_struct *mm)
{ {
return mm->pasid != IOMMU_PASID_INVALID; return READ_ONCE(mm->iommu_mm);
} }
static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
{
struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm);
if (!iommu_mm)
return IOMMU_PASID_INVALID;
return iommu_mm->pasid;
}
void mm_pasid_drop(struct mm_struct *mm); void mm_pasid_drop(struct mm_struct *mm);
struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct iommu_sva *iommu_sva_bind_device(struct device *dev,
struct mm_struct *mm); struct mm_struct *mm);
@ -1368,6 +1393,12 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
} }
static inline void mm_pasid_init(struct mm_struct *mm) {} static inline void mm_pasid_init(struct mm_struct *mm) {}
static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; } static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
{
return IOMMU_PASID_INVALID;
}
static inline void mm_pasid_drop(struct mm_struct *mm) {} static inline void mm_pasid_drop(struct mm_struct *mm) {}
#endif /* CONFIG_IOMMU_SVA */ #endif /* CONFIG_IOMMU_SVA */

View File

@ -730,6 +730,7 @@ struct mm_cid {
#endif #endif
struct kioctx_table; struct kioctx_table;
struct iommu_mm_data;
struct mm_struct { struct mm_struct {
struct { struct {
/* /*
@ -941,8 +942,8 @@ struct mm_struct {
#endif #endif
struct work_struct async_put_work; struct work_struct async_put_work;
#ifdef CONFIG_IOMMU_SVA #ifdef CONFIG_IOMMU_MM_DATA
u32 pasid; struct iommu_mm_data *iommu_mm;
#endif #endif
#ifdef CONFIG_KSM #ifdef CONFIG_KSM
/* /*

View File

@ -8,20 +8,19 @@ struct iommu_ops;
#ifdef CONFIG_OF_IOMMU #ifdef CONFIG_OF_IOMMU
extern const struct iommu_ops *of_iommu_configure(struct device *dev, extern int of_iommu_configure(struct device *dev, struct device_node *master_np,
struct device_node *master_np, const u32 *id);
const u32 *id);
extern void of_iommu_get_resv_regions(struct device *dev, extern void of_iommu_get_resv_regions(struct device *dev,
struct list_head *list); struct list_head *list);
#else #else
static inline const struct iommu_ops *of_iommu_configure(struct device *dev, static inline int of_iommu_configure(struct device *dev,
struct device_node *master_np, struct device_node *master_np,
const u32 *id) const u32 *id)
{ {
return NULL; return -ENODEV;
} }
static inline void of_iommu_get_resv_regions(struct device *dev, static inline void of_iommu_get_resv_regions(struct device *dev,

View File

@ -957,7 +957,7 @@ struct task_struct {
/* Recursion prevention for eventfd_signal() */ /* Recursion prevention for eventfd_signal() */
unsigned in_eventfd:1; unsigned in_eventfd:1;
#endif #endif
#ifdef CONFIG_IOMMU_SVA #ifdef CONFIG_ARCH_HAS_CPU_PASID
unsigned pasid_activated:1; unsigned pasid_activated:1;
#endif #endif
#ifdef CONFIG_CPU_SUP_INTEL #ifdef CONFIG_CPU_SUP_INTEL

View File

@ -1173,7 +1173,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->use_memdelay = 0; tsk->use_memdelay = 0;
#endif #endif
#ifdef CONFIG_IOMMU_SVA #ifdef CONFIG_ARCH_HAS_CPU_PASID
tsk->pasid_activated = 0; tsk->pasid_activated = 0;
#endif #endif

View File

@ -1258,6 +1258,9 @@ config LOCK_MM_AND_FIND_VMA
bool bool
depends on !STACK_GROWSUP depends on !STACK_GROWSUP
config IOMMU_MM_DATA
bool
source "mm/damon/Kconfig" source "mm/damon/Kconfig"
endmenu endmenu

View File

@ -44,9 +44,6 @@ struct mm_struct init_mm = {
#endif #endif
.user_ns = &init_user_ns, .user_ns = &init_user_ns,
.cpu_bitmap = CPU_BITS_NONE, .cpu_bitmap = CPU_BITS_NONE,
#ifdef CONFIG_IOMMU_SVA
.pasid = IOMMU_PASID_INVALID,
#endif
INIT_MM_CONTEXT(init_mm) INIT_MM_CONTEXT(init_mm)
}; };