mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-07 22:42:04 +00:00
dma-iommu: use static-key to minimize the impact in the fast-path
Let's move out the is_kdump_kernel() check from iommu_dma_deferred_attach() to iommu_dma_init(), and use the static-key in the fast-path to minimize the impact in the normal case. Co-developed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Lianbo Jiang <lijiang@redhat.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/20210126115337.20068-2-lijiang@redhat.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
262948f8ba
commit
a8e8af35c9
@ -51,6 +51,8 @@ struct iommu_dma_cookie {
|
||||
struct iommu_domain *fq_domain;
|
||||
};
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
|
||||
|
||||
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
|
||||
struct iommu_domain *domain)
|
||||
{
|
||||
@ -383,9 +385,6 @@ static int iommu_dma_deferred_attach(struct device *dev,
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
|
||||
if (!is_kdump_kernel())
|
||||
return 0;
|
||||
|
||||
if (unlikely(ops->is_attach_deferred &&
|
||||
ops->is_attach_deferred(domain, dev)))
|
||||
return iommu_attach_device(domain, dev);
|
||||
@ -535,7 +534,8 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||
size_t iova_off = iova_offset(iovad, phys);
|
||||
dma_addr_t iova;
|
||||
|
||||
if (unlikely(iommu_dma_deferred_attach(dev, domain)))
|
||||
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
|
||||
iommu_dma_deferred_attach(dev, domain))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
size = iova_align(iovad, size + iova_off);
|
||||
@ -693,7 +693,8 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
||||
|
||||
*dma_handle = DMA_MAPPING_ERROR;
|
||||
|
||||
if (unlikely(iommu_dma_deferred_attach(dev, domain)))
|
||||
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
|
||||
iommu_dma_deferred_attach(dev, domain))
|
||||
return NULL;
|
||||
|
||||
min_size = alloc_sizes & -alloc_sizes;
|
||||
@ -976,7 +977,8 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
unsigned long mask = dma_get_seg_boundary(dev);
|
||||
int i;
|
||||
|
||||
if (unlikely(iommu_dma_deferred_attach(dev, domain)))
|
||||
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
|
||||
iommu_dma_deferred_attach(dev, domain))
|
||||
return 0;
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
@ -1424,6 +1426,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
|
||||
|
||||
static int iommu_dma_init(void)
|
||||
{
|
||||
if (is_kdump_kernel())
|
||||
static_branch_enable(&iommu_deferred_attach_enabled);
|
||||
|
||||
return iova_cache_get();
|
||||
}
|
||||
arch_initcall(iommu_dma_init);
|
||||
|
Loading…
Reference in New Issue
Block a user