iommu: Rename ops->domain_alloc_user() to domain_alloc_paging_flags()

Now that the main domain allocating path is calling this function it
doesn't make sense to leave it named _user. Change the name to
alloc_paging_flags() to mirror the new iommu_paging_domain_alloc_flags()
function.

A driver should implement only one of ops->domain_alloc_paging() or
ops->domain_alloc_paging_flags(). The former is a simpler interface with
less boiler plate that the majority of drivers use. The latter is for
drivers with a greater feature set (PASID, multiple page table support,
advanced iommufd support, nesting, etc). Additional patches will be needed
to achieve this.

Link: https://patch.msgid.link/r/2-v1-c252ebdeb57b+329-iommu_paging_flags_jgg@nvidia.com
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Jason Gunthorpe 2024-11-14 15:55:31 -04:00
parent 64214c2b95
commit d53764723e
7 changed files with 32 additions and 38 deletions

View File

@ -2407,9 +2407,8 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
} }
static struct iommu_domain * static struct iommu_domain *
amd_iommu_domain_alloc_user(struct device *dev, u32 flags, amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
struct iommu_domain *parent, const struct iommu_user_data *user_data)
const struct iommu_user_data *user_data)
{ {
unsigned int type = IOMMU_DOMAIN_UNMANAGED; unsigned int type = IOMMU_DOMAIN_UNMANAGED;
@ -2420,7 +2419,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
if (dev) if (dev)
iommu = get_amd_iommu_from_dev(dev); iommu = get_amd_iommu_from_dev(dev);
if ((flags & ~supported_flags) || parent || user_data) if ((flags & ~supported_flags) || user_data)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
/* Allocate domain with v2 page table if IOMMU supports PASID. */ /* Allocate domain with v2 page table if IOMMU supports PASID. */
@ -2884,7 +2883,7 @@ const struct iommu_ops amd_iommu_ops = {
.release_domain = &release_domain, .release_domain = &release_domain,
.identity_domain = &identity_domain.domain, .identity_domain = &identity_domain.domain,
.domain_alloc = amd_iommu_domain_alloc, .domain_alloc = amd_iommu_domain_alloc,
.domain_alloc_user = amd_iommu_domain_alloc_user, .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags,
.domain_alloc_sva = amd_iommu_domain_alloc_sva, .domain_alloc_sva = amd_iommu_domain_alloc_sva,
.probe_device = amd_iommu_probe_device, .probe_device = amd_iommu_probe_device,
.release_device = amd_iommu_release_device, .release_device = amd_iommu_release_device,

View File

@ -3132,9 +3132,8 @@ static struct iommu_domain arm_smmu_blocked_domain = {
}; };
static struct iommu_domain * static struct iommu_domain *
arm_smmu_domain_alloc_user(struct device *dev, u32 flags, arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags,
struct iommu_domain *parent, const struct iommu_user_data *user_data)
const struct iommu_user_data *user_data)
{ {
struct arm_smmu_master *master = dev_iommu_priv_get(dev); struct arm_smmu_master *master = dev_iommu_priv_get(dev);
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
@ -3145,7 +3144,7 @@ arm_smmu_domain_alloc_user(struct device *dev, u32 flags,
if (flags & ~PAGING_FLAGS) if (flags & ~PAGING_FLAGS)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if (parent || user_data) if (user_data)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if (flags & IOMMU_HWPT_ALLOC_PASID) if (flags & IOMMU_HWPT_ALLOC_PASID)
@ -3546,7 +3545,7 @@ static struct iommu_ops arm_smmu_ops = {
.hw_info = arm_smmu_hw_info, .hw_info = arm_smmu_hw_info,
.domain_alloc_paging = arm_smmu_domain_alloc_paging, .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.domain_alloc_sva = arm_smmu_sva_domain_alloc, .domain_alloc_sva = arm_smmu_sva_domain_alloc,
.domain_alloc_user = arm_smmu_domain_alloc_user, .domain_alloc_paging_flags = arm_smmu_domain_alloc_paging_flags,
.probe_device = arm_smmu_probe_device, .probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device, .release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group, .device_group = arm_smmu_device_group,

View File

@ -3328,9 +3328,8 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
} }
static struct iommu_domain * static struct iommu_domain *
intel_iommu_domain_alloc_user(struct device *dev, u32 flags, intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
struct iommu_domain *parent, const struct iommu_user_data *user_data)
const struct iommu_user_data *user_data)
{ {
struct device_domain_info *info = dev_iommu_priv_get(dev); struct device_domain_info *info = dev_iommu_priv_get(dev);
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
@ -3340,9 +3339,6 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
struct iommu_domain *domain; struct iommu_domain *domain;
bool first_stage; bool first_stage;
if (parent)
return ERR_PTR(-EOPNOTSUPP);
if (flags & if (flags &
(~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING
| IOMMU_HWPT_FAULT_ID_VALID))) | IOMMU_HWPT_FAULT_ID_VALID)))
@ -4468,7 +4464,7 @@ const struct iommu_ops intel_iommu_ops = {
.identity_domain = &identity_domain, .identity_domain = &identity_domain,
.capable = intel_iommu_capable, .capable = intel_iommu_capable,
.hw_info = intel_iommu_hw_info, .hw_info = intel_iommu_hw_info,
.domain_alloc_user = intel_iommu_domain_alloc_user, .domain_alloc_paging_flags = intel_iommu_domain_alloc_paging_flags,
.domain_alloc_sva = intel_svm_domain_alloc, .domain_alloc_sva = intel_svm_domain_alloc,
.domain_alloc_paging = intel_iommu_domain_alloc_paging, .domain_alloc_paging = intel_iommu_domain_alloc_paging,
.domain_alloc_nested = intel_iommu_domain_alloc_nested, .domain_alloc_nested = intel_iommu_domain_alloc_nested,

View File

@ -1987,8 +1987,8 @@ __iommu_paging_domain_alloc_flags(struct device *dev, unsigned int type,
if (ops->domain_alloc_paging && !flags) if (ops->domain_alloc_paging && !flags)
domain = ops->domain_alloc_paging(dev); domain = ops->domain_alloc_paging(dev);
else if (ops->domain_alloc_user) else if (ops->domain_alloc_paging_flags)
domain = ops->domain_alloc_user(dev, flags, NULL, NULL); domain = ops->domain_alloc_paging_flags(dev, flags, NULL);
else if (ops->domain_alloc && !flags) else if (ops->domain_alloc && !flags)
domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
else else

View File

@ -119,7 +119,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
lockdep_assert_held(&ioas->mutex); lockdep_assert_held(&ioas->mutex);
if ((flags || user_data) && !ops->domain_alloc_user) if ((flags || user_data) && !ops->domain_alloc_paging_flags)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if (flags & ~valid_flags) if (flags & ~valid_flags)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
@ -139,9 +139,9 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
hwpt_paging->ioas = ioas; hwpt_paging->ioas = ioas;
hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
if (ops->domain_alloc_user) { if (ops->domain_alloc_paging_flags) {
hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL, hwpt->domain = ops->domain_alloc_paging_flags(idev->dev, flags,
user_data); user_data);
if (IS_ERR(hwpt->domain)) { if (IS_ERR(hwpt->domain)) {
rc = PTR_ERR(hwpt->domain); rc = PTR_ERR(hwpt->domain);
hwpt->domain = NULL; hwpt->domain = NULL;

View File

@ -379,9 +379,8 @@ mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
} }
static struct iommu_domain * static struct iommu_domain *
mock_domain_alloc_user(struct device *dev, u32 flags, mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
struct iommu_domain *parent, const struct iommu_user_data *user_data)
const struct iommu_user_data *user_data)
{ {
bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
@ -390,9 +389,6 @@ mock_domain_alloc_user(struct device *dev, u32 flags,
MOCK_FLAGS_DEVICE_NO_DIRTY; MOCK_FLAGS_DEVICE_NO_DIRTY;
struct iommu_domain *domain; struct iommu_domain *domain;
if (parent)
return ERR_PTR(-EOPNOTSUPP);
if (user_data) if (user_data)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops)) if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
@ -718,7 +714,7 @@ static const struct iommu_ops mock_ops = {
.pgsize_bitmap = MOCK_IO_PAGE_SIZE, .pgsize_bitmap = MOCK_IO_PAGE_SIZE,
.hw_info = mock_domain_hw_info, .hw_info = mock_domain_hw_info,
.domain_alloc_paging = mock_domain_alloc_paging, .domain_alloc_paging = mock_domain_alloc_paging,
.domain_alloc_user = mock_domain_alloc_user, .domain_alloc_paging_flags = mock_domain_alloc_paging_flags,
.domain_alloc_nested = mock_domain_alloc_nested, .domain_alloc_nested = mock_domain_alloc_nested,
.capable = mock_domain_capable, .capable = mock_domain_capable,
.device_group = generic_device_group, .device_group = generic_device_group,

View File

@ -557,13 +557,17 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
* @domain_alloc: allocate and return an iommu domain if success. Otherwise * @domain_alloc: allocate and return an iommu domain if success. Otherwise
* NULL is returned. The domain is not fully initialized until * NULL is returned. The domain is not fully initialized until
* the caller iommu_domain_alloc() returns. * the caller iommu_domain_alloc() returns.
* @domain_alloc_user: Allocate an iommu domain corresponding to the input * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the
* parameters as defined in include/uapi/linux/iommufd.h. * input parameters as defined in
* The @user_data can be optionally provided, the * include/uapi/linux/iommufd.h. The @user_data can be
* new domain must support __IOMMU_DOMAIN_PAGING. * optionally provided, the new domain must support
* Upon failure, ERR_PTR must be returned. * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be
* returned.
* @domain_alloc_paging: Allocate an iommu_domain that can be used for * @domain_alloc_paging: Allocate an iommu_domain that can be used for
* UNMANAGED, DMA, and DMA_FQ domain types. * UNMANAGED, DMA, and DMA_FQ domain types. This is the
* same as invoking domain_alloc_paging_flags() with
* @flags=0, @user_data=NULL. A driver should implement
* only one of the two ops.
* @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
* @domain_alloc_nested: Allocate an iommu_domain for nested translation. * @domain_alloc_nested: Allocate an iommu_domain for nested translation.
* @probe_device: Add device to iommu driver handling * @probe_device: Add device to iommu driver handling
@ -614,8 +618,8 @@ struct iommu_ops {
/* Domain allocation and freeing by the iommu driver */ /* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
struct iommu_domain *(*domain_alloc_user)( struct iommu_domain *(*domain_alloc_paging_flags)(
struct device *dev, u32 flags, struct iommu_domain *parent, struct device *dev, u32 flags,
const struct iommu_user_data *user_data); const struct iommu_user_data *user_data);
struct iommu_domain *(*domain_alloc_paging)(struct device *dev); struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
struct iommu_domain *(*domain_alloc_sva)(struct device *dev, struct iommu_domain *(*domain_alloc_sva)(struct device *dev,