mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 02:33:57 +00:00
iommu: Use EINVAL for incompatible device/domain in ->attach_dev
Following the new rules in include/linux/iommu.h kdocs, update all drivers ->attach_dev callback functions to return EINVAL in the failure paths that are related to domain incompatibility. Also, drop adjacent error prints to prevent a kernel log spam. Link: https://lore.kernel.org/r/f52a07f7320da94afe575c9631340d0019a203a7.1666042873.git.nicolinc@nvidia.com Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
bd7ebb7719
commit
f4a1477357
@ -2430,23 +2430,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
goto out_unlock;
|
||||
}
|
||||
} else if (smmu_domain->smmu != smmu) {
|
||||
dev_err(dev,
|
||||
"cannot attach to SMMU %s (upstream of %s)\n",
|
||||
dev_name(smmu_domain->smmu->dev),
|
||||
dev_name(smmu->dev));
|
||||
ret = -ENXIO;
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
|
||||
master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
|
||||
dev_err(dev,
|
||||
"cannot attach to incompatible domain (%u SSID bits != %u)\n",
|
||||
smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
|
||||
smmu_domain->stall_enabled != master->stall_enabled) {
|
||||
dev_err(dev, "cannot attach to stall-%s domain\n",
|
||||
smmu_domain->stall_enabled ? "enabled" : "disabled");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -1150,9 +1150,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
* different SMMUs.
|
||||
*/
|
||||
if (smmu_domain->smmu != smmu) {
|
||||
dev_err(dev,
|
||||
"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
|
||||
dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
|
||||
ret = -EINVAL;
|
||||
goto rpm_put;
|
||||
}
|
||||
|
@ -381,13 +381,8 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
|
||||
* Sanity check the domain. We don't support domains across
|
||||
* different IOMMUs.
|
||||
*/
|
||||
if (qcom_domain->iommu != qcom_iommu) {
|
||||
dev_err(dev, "cannot attach to IOMMU %s while already "
|
||||
"attached to domain on IOMMU %s\n",
|
||||
dev_name(qcom_domain->iommu->dev),
|
||||
dev_name(qcom_iommu->dev));
|
||||
if (qcom_domain->iommu != qcom_iommu)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -4194,19 +4194,15 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
|
||||
return -ENODEV;
|
||||
|
||||
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
|
||||
return -EOPNOTSUPP;
|
||||
return -EINVAL;
|
||||
|
||||
/* check if this iommu agaw is sufficient for max mapped address */
|
||||
addr_width = agaw_to_width(iommu->agaw);
|
||||
if (addr_width > cap_mgaw(iommu->cap))
|
||||
addr_width = cap_mgaw(iommu->cap);
|
||||
|
||||
if (dmar_domain->max_addr > (1LL << addr_width)) {
|
||||
dev_err(dev, "%s: iommu width (%d) is not "
|
||||
"sufficient for the mapped address (%llx)\n",
|
||||
__func__, addr_width, dmar_domain->max_addr);
|
||||
return -EFAULT;
|
||||
}
|
||||
if (dmar_domain->max_addr > (1LL << addr_width))
|
||||
return -EINVAL;
|
||||
dmar_domain->gaw = addr_width;
|
||||
|
||||
/*
|
||||
|
@ -628,8 +628,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
|
||||
* Something is wrong, we can't attach two devices using
|
||||
* different IOMMUs to the same domain.
|
||||
*/
|
||||
dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
|
||||
dev_name(mmu->dev), dev_name(domain->mmu->dev));
|
||||
ret = -EINVAL;
|
||||
} else
|
||||
dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
|
||||
|
@ -1472,7 +1472,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
/* only a single client device can be attached to a domain */
|
||||
if (omap_domain->dev) {
|
||||
dev_err(dev, "iommu domain is already attached\n");
|
||||
ret = -EBUSY;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -237,10 +237,8 @@ static int sprd_iommu_attach_device(struct iommu_domain *domain,
|
||||
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
|
||||
size_t pgt_size = sprd_iommu_pgt_size(domain);
|
||||
|
||||
if (dom->sdev) {
|
||||
pr_err("There's already a device attached to this domain.\n");
|
||||
if (dom->sdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL);
|
||||
if (!dom->pgt_va)
|
||||
|
@ -112,7 +112,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
|
||||
spin_lock(&gart->dom_lock);
|
||||
|
||||
if (gart->active_domain && gart->active_domain != domain) {
|
||||
ret = -EBUSY;
|
||||
ret = -EINVAL;
|
||||
} else if (dev_iommu_priv_get(dev) != domain) {
|
||||
dev_iommu_priv_set(dev, domain);
|
||||
gart->active_domain = domain;
|
||||
|
@ -734,8 +734,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
*/
|
||||
ret = viommu_domain_finalise(vdev, domain);
|
||||
} else if (vdomain->viommu != vdev->viommu) {
|
||||
dev_err(dev, "cannot attach to foreign vIOMMU\n");
|
||||
ret = -EXDEV;
|
||||
ret = -EINVAL;
|
||||
}
|
||||
mutex_unlock(&vdomain->mutex);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user