mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
IOMMU Fixes for Linux v6.11-rc5
Including: - Fix a device-stall problem in bad io-page-fault setups (faults received from devices with no supporting domain attached). - Context flush fix for Intel VT-d. - Do not allow non-read+non-write mapping through iommufd as most implementations can not handle that. - Fix a possible infinite-loop issue in map_pages() path. - Add Jean-Philippe as reviewer for SMMUv3 SVA support -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAmbRvfEACgkQK/BELZcB GuOB8w//WLapQpxMw9w+4l3Z3SqxB5gSPF6pdCJwYRrpFGBX1yNZ0vWtF2TpKtOC NaMa/EC1C2FWjcArCP21uFtDvN04FgXSVl6sjFUHsUf+YALrUfljQk/XFI4SenTq PtvPv8PVGbhqLtdJDXMlQWBN3RX0qK/PIFmuUX5ySBk7J7k5QyBi2HEuK2DbPM7j +LMnyTHj5Aa2jRz/NSCDIRKbSFJKgvd8apval2VX0zljjpyqk5KmHHjkLtiOiTTI G6ZJlRYCn98eTLU2ww8b7/y0vVYop7C1Q7Cyds/72xvW+a3jbSRIGf6yqtmdbMYd faxRng5rWHWsq3XMZC+Ts9k2FA3pUIvOmfptCFfrQYYXvZI6dD6o7uMko6SF82n4 xEy+H6AEWZXF70xaJDp1cn1PpURJgJly/l/6qAIB746qNT7j/CcOOha1bpbCy81x EIOl0B4wyJGjQnxjKsH01K9ec3uT6rugbpFEE9PL8l25khhyweBwuQWc2EVxRZgH ICH4pCmvU9Wy6mpXL2R/SyzECWjgg0oJr+pq3Yxv7xufSGQswWJ/StFozSBHnH01 OGGA/2xMrNeRzlm4PZfRzdAiCfYX9kEodiF1jGLA4B1V5Tx/y1LSX7W/nCeZmlRz /OhEC07DWZumeSCTe5I+BmZwiXh/DEAlUypDQkVKaaeGltlyvl8= =8XuD -----END PGP SIGNATURE----- Merge tag 'iommu-fixes-v6.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux Pull iommu fixes from Joerg Roedel: - Fix a device-stall problem in bad io-page-fault setups (faults received from devices with no supporting domain attached). - Context flush fix for Intel VT-d. - Do not allow non-read+non-write mapping through iommufd as most implementations can not handle that. - Fix a possible infinite-loop issue in map_pages() path. - Add Jean-Philippe as reviewer for SMMUv3 SVA support * tag 'iommu-fixes-v6.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux: MAINTAINERS: Add Jean-Philippe as SMMUv3 SVA reviewer iommu: Do not return 0 from map_pages if it doesn't do anything iommufd: Do not allow creating areas without READ or WRITE iommu/vt-d: Fix incorrect domain ID in context flush helper iommu: Handle iommu faults for a bad iopf setup
This commit is contained in:
commit
13c6bba601
@ -1880,6 +1880,10 @@ F: Documentation/devicetree/bindings/iommu/arm,smmu*
|
|||||||
F: drivers/iommu/arm/
|
F: drivers/iommu/arm/
|
||||||
F: drivers/iommu/io-pgtable-arm*
|
F: drivers/iommu/io-pgtable-arm*
|
||||||
|
|
||||||
|
ARM SMMU SVA SUPPORT
|
||||||
|
R: Jean-Philippe Brucker <jean-philippe@linaro.org>
|
||||||
|
F: drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
|
||||||
|
|
||||||
ARM SUB-ARCHITECTURES
|
ARM SUB-ARCHITECTURES
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -1777,7 +1777,7 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
|
|||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
iommu_report_device_fault(master->dev, &fault_evt);
|
ret = iommu_report_device_fault(master->dev, &fault_evt);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&smmu->streams_mutex);
|
mutex_unlock(&smmu->streams_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1944,6 +1944,7 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
|
|||||||
{
|
{
|
||||||
struct intel_iommu *iommu = info->iommu;
|
struct intel_iommu *iommu = info->iommu;
|
||||||
struct context_entry *context;
|
struct context_entry *context;
|
||||||
|
u16 did;
|
||||||
|
|
||||||
spin_lock(&iommu->lock);
|
spin_lock(&iommu->lock);
|
||||||
context = iommu_context_addr(iommu, bus, devfn, 0);
|
context = iommu_context_addr(iommu, bus, devfn, 0);
|
||||||
@ -1952,10 +1953,11 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
did = context_domain_id(context);
|
||||||
context_clear_entry(context);
|
context_clear_entry(context);
|
||||||
__iommu_flush_cache(iommu, context, sizeof(*context));
|
__iommu_flush_cache(iommu, context, sizeof(*context));
|
||||||
spin_unlock(&iommu->lock);
|
spin_unlock(&iommu->lock);
|
||||||
intel_context_flush_present(info, context, true);
|
intel_context_flush_present(info, context, did, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int domain_setup_first_level(struct intel_iommu *iommu,
|
static int domain_setup_first_level(struct intel_iommu *iommu,
|
||||||
@ -4249,6 +4251,7 @@ static int context_flip_pri(struct device_domain_info *info, bool enable)
|
|||||||
struct intel_iommu *iommu = info->iommu;
|
struct intel_iommu *iommu = info->iommu;
|
||||||
u8 bus = info->bus, devfn = info->devfn;
|
u8 bus = info->bus, devfn = info->devfn;
|
||||||
struct context_entry *context;
|
struct context_entry *context;
|
||||||
|
u16 did;
|
||||||
|
|
||||||
spin_lock(&iommu->lock);
|
spin_lock(&iommu->lock);
|
||||||
if (context_copied(iommu, bus, devfn)) {
|
if (context_copied(iommu, bus, devfn)) {
|
||||||
@ -4261,6 +4264,7 @@ static int context_flip_pri(struct device_domain_info *info, bool enable)
|
|||||||
spin_unlock(&iommu->lock);
|
spin_unlock(&iommu->lock);
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
did = context_domain_id(context);
|
||||||
|
|
||||||
if (enable)
|
if (enable)
|
||||||
context_set_sm_pre(context);
|
context_set_sm_pre(context);
|
||||||
@ -4269,7 +4273,7 @@ static int context_flip_pri(struct device_domain_info *info, bool enable)
|
|||||||
|
|
||||||
if (!ecap_coherent(iommu->ecap))
|
if (!ecap_coherent(iommu->ecap))
|
||||||
clflush_cache_range(context, sizeof(*context));
|
clflush_cache_range(context, sizeof(*context));
|
||||||
intel_context_flush_present(info, context, true);
|
intel_context_flush_present(info, context, did, true);
|
||||||
spin_unlock(&iommu->lock);
|
spin_unlock(&iommu->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1154,7 +1154,7 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
|
|||||||
|
|
||||||
void intel_context_flush_present(struct device_domain_info *info,
|
void intel_context_flush_present(struct device_domain_info *info,
|
||||||
struct context_entry *context,
|
struct context_entry *context,
|
||||||
bool affect_domains);
|
u16 did, bool affect_domains);
|
||||||
|
|
||||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||||
void intel_svm_check(struct intel_iommu *iommu);
|
void intel_svm_check(struct intel_iommu *iommu);
|
||||||
|
@ -683,6 +683,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
|
|||||||
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
||||||
struct intel_iommu *iommu = info->iommu;
|
struct intel_iommu *iommu = info->iommu;
|
||||||
struct context_entry *context;
|
struct context_entry *context;
|
||||||
|
u16 did;
|
||||||
|
|
||||||
spin_lock(&iommu->lock);
|
spin_lock(&iommu->lock);
|
||||||
context = iommu_context_addr(iommu, bus, devfn, false);
|
context = iommu_context_addr(iommu, bus, devfn, false);
|
||||||
@ -691,10 +692,11 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
did = context_domain_id(context);
|
||||||
context_clear_entry(context);
|
context_clear_entry(context);
|
||||||
__iommu_flush_cache(iommu, context, sizeof(*context));
|
__iommu_flush_cache(iommu, context, sizeof(*context));
|
||||||
spin_unlock(&iommu->lock);
|
spin_unlock(&iommu->lock);
|
||||||
intel_context_flush_present(info, context, false);
|
intel_context_flush_present(info, context, did, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
|
static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
|
||||||
@ -885,10 +887,9 @@ static void __context_flush_dev_iotlb(struct device_domain_info *info)
|
|||||||
*/
|
*/
|
||||||
void intel_context_flush_present(struct device_domain_info *info,
|
void intel_context_flush_present(struct device_domain_info *info,
|
||||||
struct context_entry *context,
|
struct context_entry *context,
|
||||||
bool flush_domains)
|
u16 did, bool flush_domains)
|
||||||
{
|
{
|
||||||
struct intel_iommu *iommu = info->iommu;
|
struct intel_iommu *iommu = info->iommu;
|
||||||
u16 did = context_domain_id(context);
|
|
||||||
struct pasid_entry *pte;
|
struct pasid_entry *pte;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -115,6 +115,59 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
|
|||||||
return group;
|
return group;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct iommu_attach_handle *find_fault_handler(struct device *dev,
|
||||||
|
struct iopf_fault *evt)
|
||||||
|
{
|
||||||
|
struct iommu_fault *fault = &evt->fault;
|
||||||
|
struct iommu_attach_handle *attach_handle;
|
||||||
|
|
||||||
|
if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
|
||||||
|
attach_handle = iommu_attach_handle_get(dev->iommu_group,
|
||||||
|
fault->prm.pasid, 0);
|
||||||
|
if (IS_ERR(attach_handle)) {
|
||||||
|
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||||
|
|
||||||
|
if (!ops->user_pasid_table)
|
||||||
|
return NULL;
|
||||||
|
/*
|
||||||
|
* The iommu driver for this device supports user-
|
||||||
|
* managed PASID table. Therefore page faults for
|
||||||
|
* any PASID should go through the NESTING domain
|
||||||
|
* attached to the device RID.
|
||||||
|
*/
|
||||||
|
attach_handle = iommu_attach_handle_get(
|
||||||
|
dev->iommu_group, IOMMU_NO_PASID,
|
||||||
|
IOMMU_DOMAIN_NESTED);
|
||||||
|
if (IS_ERR(attach_handle))
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
attach_handle = iommu_attach_handle_get(dev->iommu_group,
|
||||||
|
IOMMU_NO_PASID, 0);
|
||||||
|
|
||||||
|
if (IS_ERR(attach_handle))
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!attach_handle->domain->iopf_handler)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return attach_handle;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void iopf_error_response(struct device *dev, struct iopf_fault *evt)
|
||||||
|
{
|
||||||
|
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||||
|
struct iommu_fault *fault = &evt->fault;
|
||||||
|
struct iommu_page_response resp = {
|
||||||
|
.pasid = fault->prm.pasid,
|
||||||
|
.grpid = fault->prm.grpid,
|
||||||
|
.code = IOMMU_PAGE_RESP_INVALID
|
||||||
|
};
|
||||||
|
|
||||||
|
ops->page_response(dev, evt, &resp);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iommu_report_device_fault() - Report fault event to device driver
|
* iommu_report_device_fault() - Report fault event to device driver
|
||||||
* @dev: the device
|
* @dev: the device
|
||||||
@ -153,24 +206,39 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
|
|||||||
* handling framework should guarantee that the iommu domain could only be
|
* handling framework should guarantee that the iommu domain could only be
|
||||||
* freed after the device has stopped generating page faults (or the iommu
|
* freed after the device has stopped generating page faults (or the iommu
|
||||||
* hardware has been set to block the page faults) and the pending page faults
|
* hardware has been set to block the page faults) and the pending page faults
|
||||||
* have been flushed.
|
* have been flushed. In case no page fault handler is attached or no iopf params
|
||||||
|
* are setup, then the ops->page_response() is called to complete the evt.
|
||||||
|
*
|
||||||
|
* Returns 0 on success, or an error in case of a bad/failed iopf setup.
|
||||||
*/
|
*/
|
||||||
void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
||||||
{
|
{
|
||||||
|
struct iommu_attach_handle *attach_handle;
|
||||||
struct iommu_fault *fault = &evt->fault;
|
struct iommu_fault *fault = &evt->fault;
|
||||||
struct iommu_fault_param *iopf_param;
|
struct iommu_fault_param *iopf_param;
|
||||||
struct iopf_group abort_group = {};
|
struct iopf_group abort_group = {};
|
||||||
struct iopf_group *group;
|
struct iopf_group *group;
|
||||||
|
|
||||||
|
attach_handle = find_fault_handler(dev, evt);
|
||||||
|
if (!attach_handle)
|
||||||
|
goto err_bad_iopf;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Something has gone wrong if a fault capable domain is attached but no
|
||||||
|
* iopf_param is setup
|
||||||
|
*/
|
||||||
iopf_param = iopf_get_dev_fault_param(dev);
|
iopf_param = iopf_get_dev_fault_param(dev);
|
||||||
if (WARN_ON(!iopf_param))
|
if (WARN_ON(!iopf_param))
|
||||||
return;
|
goto err_bad_iopf;
|
||||||
|
|
||||||
if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
|
if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
|
||||||
report_partial_fault(iopf_param, fault);
|
int ret;
|
||||||
|
|
||||||
|
ret = report_partial_fault(iopf_param, fault);
|
||||||
iopf_put_dev_fault_param(iopf_param);
|
iopf_put_dev_fault_param(iopf_param);
|
||||||
/* A request that is not the last does not need to be ack'd */
|
/* A request that is not the last does not need to be ack'd */
|
||||||
return;
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -185,38 +253,7 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
|||||||
if (group == &abort_group)
|
if (group == &abort_group)
|
||||||
goto err_abort;
|
goto err_abort;
|
||||||
|
|
||||||
if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
|
group->attach_handle = attach_handle;
|
||||||
group->attach_handle = iommu_attach_handle_get(dev->iommu_group,
|
|
||||||
fault->prm.pasid,
|
|
||||||
0);
|
|
||||||
if (IS_ERR(group->attach_handle)) {
|
|
||||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
|
||||||
|
|
||||||
if (!ops->user_pasid_table)
|
|
||||||
goto err_abort;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The iommu driver for this device supports user-
|
|
||||||
* managed PASID table. Therefore page faults for
|
|
||||||
* any PASID should go through the NESTING domain
|
|
||||||
* attached to the device RID.
|
|
||||||
*/
|
|
||||||
group->attach_handle =
|
|
||||||
iommu_attach_handle_get(dev->iommu_group,
|
|
||||||
IOMMU_NO_PASID,
|
|
||||||
IOMMU_DOMAIN_NESTED);
|
|
||||||
if (IS_ERR(group->attach_handle))
|
|
||||||
goto err_abort;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
group->attach_handle =
|
|
||||||
iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0);
|
|
||||||
if (IS_ERR(group->attach_handle))
|
|
||||||
goto err_abort;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!group->attach_handle->domain->iopf_handler)
|
|
||||||
goto err_abort;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On success iopf_handler must call iopf_group_response() and
|
* On success iopf_handler must call iopf_group_response() and
|
||||||
@ -225,7 +262,7 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
|||||||
if (group->attach_handle->domain->iopf_handler(group))
|
if (group->attach_handle->domain->iopf_handler(group))
|
||||||
goto err_abort;
|
goto err_abort;
|
||||||
|
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
err_abort:
|
err_abort:
|
||||||
dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n",
|
dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n",
|
||||||
@ -235,6 +272,14 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
|||||||
__iopf_free_group(group);
|
__iopf_free_group(group);
|
||||||
else
|
else
|
||||||
iopf_free_group(group);
|
iopf_free_group(group);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_bad_iopf:
|
||||||
|
if (fault->type == IOMMU_FAULT_PAGE_REQ)
|
||||||
|
iopf_error_response(dev, evt);
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iommu_report_device_fault);
|
EXPORT_SYMBOL_GPL(iommu_report_device_fault);
|
||||||
|
|
||||||
|
@ -552,9 +552,8 @@ static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
|||||||
paddr >= (1ULL << data->iop.cfg.oas)))
|
paddr >= (1ULL << data->iop.cfg.oas)))
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
/* If no access, then nothing to do */
|
|
||||||
if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
|
if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
while (pgcount--) {
|
while (pgcount--) {
|
||||||
ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd,
|
ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd,
|
||||||
|
@ -515,9 +515,8 @@ static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
|||||||
if (WARN_ON(iaext || paddr >> cfg->oas))
|
if (WARN_ON(iaext || paddr >> cfg->oas))
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
/* If no access, then nothing to do */
|
|
||||||
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
|
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
prot = arm_lpae_prot_to_pte(data, iommu_prot);
|
prot = arm_lpae_prot_to_pte(data, iommu_prot);
|
||||||
ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
|
ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
|
||||||
|
@ -245,9 +245,8 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
|||||||
if (WARN_ON(paddr >> cfg->oas))
|
if (WARN_ON(paddr >> cfg->oas))
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
/* If no access, then nothing to do */
|
|
||||||
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
|
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
|
||||||
return 0;
|
return -EINVAL;
|
||||||
|
|
||||||
tbl = dart_get_table(data, iova);
|
tbl = dart_get_table(data, iova);
|
||||||
|
|
||||||
|
@ -213,6 +213,10 @@ int iommufd_ioas_map(struct iommufd_ucmd *ucmd)
|
|||||||
if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX)
|
if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX)
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
||||||
|
if (!(cmd->flags &
|
||||||
|
(IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
|
ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id);
|
||||||
if (IS_ERR(ioas))
|
if (IS_ERR(ioas))
|
||||||
return PTR_ERR(ioas);
|
return PTR_ERR(ioas);
|
||||||
@ -253,6 +257,10 @@ int iommufd_ioas_copy(struct iommufd_ucmd *ucmd)
|
|||||||
cmd->dst_iova >= ULONG_MAX)
|
cmd->dst_iova >= ULONG_MAX)
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
||||||
|
if (!(cmd->flags &
|
||||||
|
(IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id);
|
src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id);
|
||||||
if (IS_ERR(src_ioas))
|
if (IS_ERR(src_ioas))
|
||||||
return PTR_ERR(src_ioas);
|
return PTR_ERR(src_ioas);
|
||||||
|
@ -1563,7 +1563,7 @@ struct iopf_queue *iopf_queue_alloc(const char *name);
|
|||||||
void iopf_queue_free(struct iopf_queue *queue);
|
void iopf_queue_free(struct iopf_queue *queue);
|
||||||
int iopf_queue_discard_partial(struct iopf_queue *queue);
|
int iopf_queue_discard_partial(struct iopf_queue *queue);
|
||||||
void iopf_free_group(struct iopf_group *group);
|
void iopf_free_group(struct iopf_group *group);
|
||||||
void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
|
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
|
||||||
void iopf_group_response(struct iopf_group *group,
|
void iopf_group_response(struct iopf_group *group,
|
||||||
enum iommu_page_response_code status);
|
enum iommu_page_response_code status);
|
||||||
#else
|
#else
|
||||||
@ -1601,9 +1601,10 @@ static inline void iopf_free_group(struct iopf_group *group)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline int
|
||||||
iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
||||||
{
|
{
|
||||||
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void iopf_group_response(struct iopf_group *group,
|
static inline void iopf_group_response(struct iopf_group *group,
|
||||||
|
@ -825,7 +825,7 @@ TEST_F(iommufd_ioas, copy_area)
|
|||||||
{
|
{
|
||||||
struct iommu_ioas_copy copy_cmd = {
|
struct iommu_ioas_copy copy_cmd = {
|
||||||
.size = sizeof(copy_cmd),
|
.size = sizeof(copy_cmd),
|
||||||
.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
|
.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
|
||||||
.dst_ioas_id = self->ioas_id,
|
.dst_ioas_id = self->ioas_id,
|
||||||
.src_ioas_id = self->ioas_id,
|
.src_ioas_id = self->ioas_id,
|
||||||
.length = PAGE_SIZE,
|
.length = PAGE_SIZE,
|
||||||
@ -1318,7 +1318,7 @@ TEST_F(iommufd_ioas, copy_sweep)
|
|||||||
{
|
{
|
||||||
struct iommu_ioas_copy copy_cmd = {
|
struct iommu_ioas_copy copy_cmd = {
|
||||||
.size = sizeof(copy_cmd),
|
.size = sizeof(copy_cmd),
|
||||||
.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
|
.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
|
||||||
.src_ioas_id = self->ioas_id,
|
.src_ioas_id = self->ioas_id,
|
||||||
.dst_iova = MOCK_APERTURE_START,
|
.dst_iova = MOCK_APERTURE_START,
|
||||||
.length = MOCK_PAGE_SIZE,
|
.length = MOCK_PAGE_SIZE,
|
||||||
@ -1608,7 +1608,7 @@ TEST_F(iommufd_mock_domain, user_copy)
|
|||||||
};
|
};
|
||||||
struct iommu_ioas_copy copy_cmd = {
|
struct iommu_ioas_copy copy_cmd = {
|
||||||
.size = sizeof(copy_cmd),
|
.size = sizeof(copy_cmd),
|
||||||
.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
|
.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
|
||||||
.dst_ioas_id = self->ioas_id,
|
.dst_ioas_id = self->ioas_id,
|
||||||
.dst_iova = MOCK_APERTURE_START,
|
.dst_iova = MOCK_APERTURE_START,
|
||||||
.length = BUFFER_SIZE,
|
.length = BUFFER_SIZE,
|
||||||
|
Loading…
Reference in New Issue
Block a user