iommu/vt-d: Drain PRQs when domain removed from RID

As this iommu driver now supports page faults for requests without
PASID, page requests should be drained when a domain is removed from
the RID2PASID entry.

This results in the intel_iommu_drain_pasid_prq() call being moved to
intel_pasid_tear_down_entry(). This indicates that when a translation
is removed from any PASID entry and the PRI has been enabled on the
device, page requests are drained in the domain detachment path.

The intel_iommu_drain_pasid_prq() helper has been modified to support
sending device TLB invalidation requests for both PASID and non-PASID
cases.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Yi Liu <yi.l.liu@intel.com>
Link: https://lore.kernel.org/r/20241101045543.70086-1-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Lu Baolu 2024-11-04 09:40:39 +08:00 committed by Joerg Roedel
parent 9baed1c280
commit c43e1ccdeb
3 changed files with 10 additions and 18 deletions

View File

@ -4067,7 +4067,6 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
intel_iommu_debugfs_remove_dev_pasid(dev_pasid); intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
kfree(dev_pasid); kfree(dev_pasid);
intel_pasid_tear_down_entry(iommu, dev, pasid, false); intel_pasid_tear_down_entry(iommu, dev, pasid, false);
intel_iommu_drain_pasid_prq(dev, pasid);
} }
static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,

View File

@ -265,6 +265,7 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
devtlb_invalidation_with_pasid(iommu, dev, pasid); devtlb_invalidation_with_pasid(iommu, dev, pasid);
intel_iommu_drain_pasid_prq(dev, pasid);
} }
/* /*

View File

@ -63,26 +63,18 @@ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid)
struct dmar_domain *domain; struct dmar_domain *domain;
struct intel_iommu *iommu; struct intel_iommu *iommu;
struct qi_desc desc[3]; struct qi_desc desc[3];
struct pci_dev *pdev;
int head, tail; int head, tail;
u16 sid, did; u16 sid, did;
int qdep;
info = dev_iommu_priv_get(dev); info = dev_iommu_priv_get(dev);
if (WARN_ON(!info || !dev_is_pci(dev)))
return;
if (!info->pri_enabled) if (!info->pri_enabled)
return; return;
iommu = info->iommu; iommu = info->iommu;
domain = info->domain; domain = info->domain;
pdev = to_pci_dev(dev);
sid = PCI_DEVID(info->bus, info->devfn); sid = PCI_DEVID(info->bus, info->devfn);
did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID; did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID;
qdep = pci_ats_queue_depth(pdev);
/* /*
* Check and wait until all pending page requests in the queue are * Check and wait until all pending page requests in the queue are
* handled by the prq handling thread. * handled by the prq handling thread.
@ -114,15 +106,15 @@ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid)
desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) | desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
QI_IWD_FENCE | QI_IWD_FENCE |
QI_IWD_TYPE; QI_IWD_TYPE;
desc[1].qw0 = QI_EIOTLB_PASID(pasid) | if (pasid == IOMMU_NO_PASID) {
QI_EIOTLB_DID(did) | qi_desc_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, &desc[1]);
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | qi_desc_dev_iotlb(sid, info->pfsid, info->ats_qdep, 0,
QI_EIOTLB_TYPE; MAX_AGAW_PFN_WIDTH, &desc[2]);
desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) | } else {
QI_DEV_EIOTLB_SID(sid) | qi_desc_piotlb(did, pasid, 0, -1, 0, &desc[1]);
QI_DEV_EIOTLB_QDEP(qdep) | qi_desc_dev_iotlb_pasid(sid, info->pfsid, pasid, info->ats_qdep,
QI_DEIOTLB_TYPE | 0, MAX_AGAW_PFN_WIDTH, &desc[2]);
QI_DEV_IOTLB_PFSID(info->pfsid); }
qi_retry: qi_retry:
reinit_completion(&iommu->prq_complete); reinit_completion(&iommu->prq_complete);
qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN); qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);