mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
IOMMU Fixes for Linux v6.13-rc2
Including: - Per-domain device-list locking fixes for the AMD IOMMU driver. - Fix incorrect use of smp_processor_id() in the NVidia-specific part of the ARM-SMMU-v3 driver. - Intel IOMMU driver fixes: - Remove cache tags before disabling ATS. - Avoid draining PRQ in sva mm release path. - Fix qi_batch NULL pointer with nested parent domain. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmddTjQACgkQK/BELZcB GuPJUQ/9GPxzNkavRWH4J9eCwjoQBJsquRuabfdhvOFUVdWoNSeyyHWZInic/RV8 ssUE0Vzk7nsdw1mbrtgTdGqkia/VX6Y2EGnFPQG3m+BNYw1pL91eO4oqt7dc4qMX tmR6EQeOBI9JbgszLXXK5UDSV/mP71eFLCvKiGco77+jnUJn4g8nlJoskeP7eX7S pR1bzrgF0vSEeDAJ5PgWjddJsrO6CCbtOEogFovQjDIMCG5zz1pAEV+MK/wWwtVO qFY8QxwyNK5PiS9/SZvN3PKxHvdD1QQEZVgLw1NfNbG3/G5SX7Wv0DPjK02o2gAI QjjeyB3QrxZp1dl4ZY/BuQuZx3zGNoYXW9zEQHAtcp7mHYZMXsEUe31XQ59u/YhX vUnP9dPKZLoH0gaBN3QVkj8Ajw6NNsEAt0qFFD6r4MFm8/weAvEZfjoNa3v7Mqqs 8XPQeJV1fiLTnMgboQ5nxEIozn4eI2MM4VxpzpfhcQmpy8gYBTQsv2qNPZZiXVOv jrLp2HWKa+nTZlVqKEGgnHC4C3A1EDOSE5kB5UaexPaisoj7G5yshuNBGVRYfB2v zdFshEeKFaH1I7Uh8B0yjlqNZFnRzPLF3GCa/slH5QthBNVGBKGSGcfVakminrDn rmEioHmNUnyhLGNM+y3Xu3XwEMfSXeh+VAEhPBkrTrfMjINbF88= =B8li -----END PGP SIGNATURE----- Merge tag 'iommu-fixes-v6.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux Pull iommu fixes from Joerg Roedel: - Per-domain device-list locking fixes for the AMD IOMMU driver - Fix incorrect use of smp_processor_id() in the NVidia-specific part of the ARM-SMMU-v3 driver - Intel IOMMU driver fixes: - Remove cache tags before disabling ATS - Avoid draining PRQ in sva mm release path - Fix qi_batch NULL pointer with nested parent domain * tag 'iommu-fixes-v6.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux: iommu/vt-d: Avoid draining PRQ in sva mm release path iommu/vt-d: Fix qi_batch NULL pointer with nested parent domain iommu/vt-d: Remove cache tags before disabling ATS iommu/amd: Add lockdep asserts for domain->dev_list iommu/amd: Put list_add/del(dev_data) back under the domain->lock iommu/tegra241-cmdqv: do not use smp_processor_id in preemptible context
This commit is contained in:
commit
115c0cc251
@ -1415,6 +1415,7 @@ static int domain_flush_pages_v2(struct protection_domain *pdom,
|
||||
struct iommu_cmd cmd;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&pdom->lock);
|
||||
list_for_each_entry(dev_data, &pdom->dev_list, list) {
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
|
||||
u16 domid = dev_data->gcr3_info.domid;
|
||||
@ -1464,6 +1465,8 @@ static void __domain_flush_pages(struct protection_domain *domain,
|
||||
ioasid_t pasid = IOMMU_NO_PASID;
|
||||
bool gn = false;
|
||||
|
||||
lockdep_assert_held(&domain->lock);
|
||||
|
||||
if (pdom_is_v2_pgtbl_mode(domain)) {
|
||||
gn = true;
|
||||
ret = domain_flush_pages_v2(domain, address, size);
|
||||
@ -1585,6 +1588,8 @@ void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
|
||||
lockdep_assert_held(&domain->lock);
|
||||
|
||||
list_for_each_entry(dev_data, &domain->dev_list, list) {
|
||||
struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
|
||||
|
||||
@ -2073,6 +2078,7 @@ static int attach_device(struct device *dev,
|
||||
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
|
||||
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
|
||||
struct pci_dev *pdev;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&dev_data->mutex);
|
||||
@ -2113,7 +2119,9 @@ static int attach_device(struct device *dev,
|
||||
|
||||
/* Update data structures */
|
||||
dev_data->domain = domain;
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
list_add(&dev_data->list, &domain->dev_list);
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
/* Update device table */
|
||||
dev_update_dte(dev_data, true);
|
||||
@ -2160,6 +2168,7 @@ static void detach_device(struct device *dev)
|
||||
/* Flush IOTLB and wait for the flushes to finish */
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
amd_iommu_domain_flush_all(domain);
|
||||
list_del(&dev_data->list);
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
/* Clear GCR3 table */
|
||||
@ -2168,7 +2177,6 @@ static void detach_device(struct device *dev)
|
||||
|
||||
/* Update data structures */
|
||||
dev_data->domain = NULL;
|
||||
list_del(&dev_data->list);
|
||||
|
||||
/* decrease reference counters - needs to happen after the flushes */
|
||||
pdom_detach_iommu(iommu, domain);
|
||||
|
@ -339,7 +339,7 @@ tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu,
|
||||
* one CPU at a time can enter the process, while the others
|
||||
* will be spinning at the same lock.
|
||||
*/
|
||||
lidx = smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
|
||||
lidx = raw_smp_processor_id() % cmdqv->num_lvcmdqs_per_vintf;
|
||||
vcmdq = vintf->lvcmdqs[lidx];
|
||||
if (!vcmdq || !READ_ONCE(vcmdq->enabled))
|
||||
return NULL;
|
||||
|
@ -105,12 +105,35 @@ static void cache_tag_unassign(struct dmar_domain *domain, u16 did,
|
||||
spin_unlock_irqrestore(&domain->cache_lock, flags);
|
||||
}
|
||||
|
||||
/* domain->qi_batch will be freed in iommu_free_domain() path. */
|
||||
static int domain_qi_batch_alloc(struct dmar_domain *domain)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&domain->cache_lock, flags);
|
||||
if (domain->qi_batch)
|
||||
goto out_unlock;
|
||||
|
||||
domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_ATOMIC);
|
||||
if (!domain->qi_batch)
|
||||
ret = -ENOMEM;
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&domain->cache_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __cache_tag_assign_domain(struct dmar_domain *domain, u16 did,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
int ret;
|
||||
|
||||
ret = domain_qi_batch_alloc(domain);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_IOTLB);
|
||||
if (ret || !info->ats_enabled)
|
||||
return ret;
|
||||
@ -139,6 +162,10 @@ static int __cache_tag_assign_parent_domain(struct dmar_domain *domain, u16 did,
|
||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||
int ret;
|
||||
|
||||
ret = domain_qi_batch_alloc(domain);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cache_tag_assign(domain, did, dev, pasid, CACHE_TAG_NESTING_IOTLB);
|
||||
if (ret || !info->ats_enabled)
|
||||
return ret;
|
||||
@ -190,13 +217,6 @@ int cache_tag_assign_domain(struct dmar_domain *domain,
|
||||
u16 did = domain_get_id_for_dev(domain, dev);
|
||||
int ret;
|
||||
|
||||
/* domain->qi_bach will be freed in iommu_free_domain() path. */
|
||||
if (!domain->qi_batch) {
|
||||
domain->qi_batch = kzalloc(sizeof(*domain->qi_batch), GFP_KERNEL);
|
||||
if (!domain->qi_batch)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = __cache_tag_assign_domain(domain, did, dev, pasid);
|
||||
if (ret || domain->domain.type != IOMMU_DOMAIN_NESTED)
|
||||
return ret;
|
||||
|
@ -3220,6 +3220,9 @@ void device_block_translation(struct device *dev)
|
||||
struct intel_iommu *iommu = info->iommu;
|
||||
unsigned long flags;
|
||||
|
||||
if (info->domain)
|
||||
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
|
||||
|
||||
iommu_disable_pci_caps(info);
|
||||
if (!dev_is_real_dma_subdevice(dev)) {
|
||||
if (sm_supported(iommu))
|
||||
@ -3236,7 +3239,6 @@ void device_block_translation(struct device *dev)
|
||||
list_del(&info->link);
|
||||
spin_unlock_irqrestore(&info->domain->lock, flags);
|
||||
|
||||
cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
|
||||
domain_detach_iommu(info->domain, iommu);
|
||||
info->domain = NULL;
|
||||
}
|
||||
|
@ -265,7 +265,8 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
|
||||
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
|
||||
|
||||
devtlb_invalidation_with_pasid(iommu, dev, pasid);
|
||||
intel_iommu_drain_pasid_prq(dev, pasid);
|
||||
if (!fault_ignore)
|
||||
intel_iommu_drain_pasid_prq(dev, pasid);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user