2019-05-29 14:18:01 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2010-05-11 14:05:49 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2010
|
|
|
|
* by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
|
|
|
*
|
|
|
|
* This code provides a IOMMU for Xen PV guests with PCI passthrough.
|
|
|
|
*
|
|
|
|
* PV guests under Xen are running in an non-contiguous memory architecture.
|
|
|
|
*
|
|
|
|
* When PCI pass-through is utilized, this necessitates an IOMMU for
|
|
|
|
* translating bus (DMA) to virtual and vice-versa and also providing a
|
|
|
|
* mechanism to have contiguous pages for device drivers operations (say DMA
|
|
|
|
* operations).
|
|
|
|
*
|
|
|
|
* Specifically, under Xen the Linux idea of pages is an illusion. It
|
|
|
|
* assumes that pages start at zero and go up to the available memory. To
|
|
|
|
* help with that, the Linux Xen MMU provides a lookup mechanism to
|
|
|
|
* translate the page frame numbers (PFN) to machine frame numbers (MFN)
|
|
|
|
* and vice-versa. The MFN are the "real" frame numbers. Furthermore
|
|
|
|
* memory is not contiguous. Xen hypervisor stitches memory for guests
|
|
|
|
* from different pools, which means there is no guarantee that PFN==MFN
|
|
|
|
* and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
|
|
|
|
* allocated in descending order (high to low), meaning the guest might
|
|
|
|
* never get any MFN's under the 4GB mark.
|
|
|
|
*/
|
|
|
|
|
2013-06-28 10:21:41 +00:00
|
|
|
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
|
|
|
|
|
2018-10-30 22:09:21 +00:00
|
|
|
#include <linux/memblock.h>
|
2018-01-10 15:21:13 +00:00
|
|
|
#include <linux/dma-direct.h>
|
2020-09-22 13:36:11 +00:00
|
|
|
#include <linux/dma-map-ops.h>
|
2011-07-10 17:22:07 +00:00
|
|
|
#include <linux/export.h>
|
2010-05-11 14:05:49 +00:00
|
|
|
#include <xen/swiotlb-xen.h>
|
|
|
|
#include <xen/page.h>
|
|
|
|
#include <xen/xen-ops.h>
|
2011-07-22 16:46:43 +00:00
|
|
|
#include <xen/hvc-console.h>
|
2013-09-04 20:11:05 +00:00
|
|
|
|
2013-10-10 13:40:44 +00:00
|
|
|
#include <asm/dma-mapping.h>
|
2013-11-08 20:36:09 +00:00
|
|
|
|
2013-09-04 20:11:05 +00:00
|
|
|
#include <trace/events/swiotlb.h>
|
2019-09-02 08:39:58 +00:00
|
|
|
#define MAX_DMA_BITS 32
|
2010-05-11 14:05:49 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Quick lookup value of the bus address of the IOTLB.
|
|
|
|
*/
|
|
|
|
|
2020-07-10 22:34:25 +00:00
|
|
|
static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2015-09-09 14:18:45 +00:00
|
|
|
unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
|
2020-07-10 22:34:25 +00:00
|
|
|
phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
|
2014-01-20 11:30:41 +00:00
|
|
|
|
2020-07-10 22:34:25 +00:00
|
|
|
baddr |= paddr & ~XEN_PAGE_MASK;
|
|
|
|
return baddr;
|
|
|
|
}
|
2014-01-20 11:30:41 +00:00
|
|
|
|
2020-07-10 22:34:25 +00:00
|
|
|
static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
|
|
{
|
|
|
|
return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 22:34:25 +00:00
|
|
|
static inline phys_addr_t xen_bus_to_phys(struct device *dev,
|
|
|
|
phys_addr_t baddr)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2015-09-09 14:18:45 +00:00
|
|
|
unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
|
2020-07-10 22:34:25 +00:00
|
|
|
phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
|
|
|
|
(baddr & ~XEN_PAGE_MASK);
|
2014-01-20 11:30:41 +00:00
|
|
|
|
|
|
|
return paddr;
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
2020-07-10 22:34:25 +00:00
|
|
|
static inline phys_addr_t xen_dma_to_phys(struct device *dev,
|
|
|
|
dma_addr_t dma_addr)
|
|
|
|
{
|
|
|
|
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
|
|
|
|
}
|
|
|
|
|
2019-06-14 05:46:03 +00:00
|
|
|
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2019-06-14 05:46:03 +00:00
|
|
|
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
|
|
|
|
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
|
2024-09-13 10:05:02 +00:00
|
|
|
phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
|
2010-05-11 14:05:49 +00:00
|
|
|
|
2015-09-09 14:18:45 +00:00
|
|
|
next_bfn = pfn_to_bfn(xen_pfn);
|
2010-05-11 14:05:49 +00:00
|
|
|
|
2024-09-13 10:05:02 +00:00
|
|
|
/* If buffer is physically aligned, ensure DMA alignment. */
|
|
|
|
if (IS_ALIGNED(p, algn) &&
|
|
|
|
!IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
|
|
|
|
return 1;
|
|
|
|
|
2019-06-14 05:46:03 +00:00
|
|
|
for (i = 1; i < nr_pages; i++)
|
2015-09-09 14:18:45 +00:00
|
|
|
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
|
2019-06-14 05:46:03 +00:00
|
|
|
return 1;
|
2010-05-11 14:05:49 +00:00
|
|
|
|
2019-06-14 05:46:03 +00:00
|
|
|
return 0;
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
swiotlb: reduce swiotlb pool lookups
With CONFIG_SWIOTLB_DYNAMIC enabled, each round-trip map/unmap pair
in the swiotlb results in 6 calls to swiotlb_find_pool(). In multiple
places, the pool is found and used in one function, and then must
be found again in the next function that is called because only the
tlb_addr is passed as an argument. These are the six call sites:
dma_direct_map_page:
1. swiotlb_map -> swiotlb_tbl_map_single -> swiotlb_bounce
dma_direct_unmap_page:
2. dma_direct_sync_single_for_cpu -> is_swiotlb_buffer
3. dma_direct_sync_single_for_cpu -> swiotlb_sync_single_for_cpu ->
swiotlb_bounce
4. is_swiotlb_buffer
5. swiotlb_tbl_unmap_single -> swiotlb_del_transient
6. swiotlb_tbl_unmap_single -> swiotlb_release_slots
Reduce the number of calls by finding the pool at a higher level, and
passing it as an argument instead of searching again. A key change is
for is_swiotlb_buffer() to return a pool pointer instead of a boolean,
and then pass this pool pointer to subsequent swiotlb functions.
There are 9 occurrences of is_swiotlb_buffer() used to test if a buffer
is a swiotlb buffer before calling a swiotlb function. To reduce code
duplication in getting the pool pointer and passing it as an argument,
introduce inline wrappers for this pattern. The generated code is
essentially unchanged.
Since is_swiotlb_buffer() no longer returns a boolean, rename some
functions to reflect the change:
* swiotlb_find_pool() becomes __swiotlb_find_pool()
* is_swiotlb_buffer() becomes swiotlb_find_pool()
* is_xen_swiotlb_buffer() becomes xen_swiotlb_find_pool()
With these changes, a round-trip map/unmap pair requires only 2 pool
lookups (listed using the new names and wrappers):
dma_direct_unmap_page:
1. dma_direct_sync_single_for_cpu -> swiotlb_find_pool
2. swiotlb_tbl_unmap_single -> swiotlb_find_pool
These changes come from noticing the inefficiencies in a code review,
not from performance measurements. With CONFIG_SWIOTLB_DYNAMIC,
__swiotlb_find_pool() is not trivial, and it uses an RCU read lock,
so avoiding the redundant calls helps performance in a hot path.
When CONFIG_SWIOTLB_DYNAMIC is *not* set, the code size reduction
is minimal and the perf benefits are likely negligible, but no
harm is done.
No functional change is intended.
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Reviewed-by: Petr Tesarik <petr@tesarici.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2024-07-08 19:41:00 +00:00
|
|
|
static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
|
|
|
|
dma_addr_t dma_addr)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2020-07-10 22:34:25 +00:00
|
|
|
unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
|
2015-09-09 14:18:45 +00:00
|
|
|
unsigned long xen_pfn = bfn_to_local_pfn(bfn);
|
2020-07-10 22:34:24 +00:00
|
|
|
phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
|
2010-05-11 14:05:49 +00:00
|
|
|
|
|
|
|
/* If the address is outside our domain, it CAN
|
|
|
|
* have the same virtual address as another address
|
|
|
|
* in our domain. Therefore _only_ check address within our domain.
|
|
|
|
*/
|
2021-03-01 07:44:27 +00:00
|
|
|
if (pfn_valid(PFN_DOWN(paddr)))
|
swiotlb: reduce swiotlb pool lookups
With CONFIG_SWIOTLB_DYNAMIC enabled, each round-trip map/unmap pair
in the swiotlb results in 6 calls to swiotlb_find_pool(). In multiple
places, the pool is found and used in one function, and then must
be found again in the next function that is called because only the
tlb_addr is passed as an argument. These are the six call sites:
dma_direct_map_page:
1. swiotlb_map -> swiotlb_tbl_map_single -> swiotlb_bounce
dma_direct_unmap_page:
2. dma_direct_sync_single_for_cpu -> is_swiotlb_buffer
3. dma_direct_sync_single_for_cpu -> swiotlb_sync_single_for_cpu ->
swiotlb_bounce
4. is_swiotlb_buffer
5. swiotlb_tbl_unmap_single -> swiotlb_del_transient
6. swiotlb_tbl_unmap_single -> swiotlb_release_slots
Reduce the number of calls by finding the pool at a higher level, and
passing it as an argument instead of searching again. A key change is
for is_swiotlb_buffer() to return a pool pointer instead of a boolean,
and then pass this pool pointer to subsequent swiotlb functions.
There are 9 occurrences of is_swiotlb_buffer() used to test if a buffer
is a swiotlb buffer before calling a swiotlb function. To reduce code
duplication in getting the pool pointer and passing it as an argument,
introduce inline wrappers for this pattern. The generated code is
essentially unchanged.
Since is_swiotlb_buffer() no longer returns a boolean, rename some
functions to reflect the change:
* swiotlb_find_pool() becomes __swiotlb_find_pool()
* is_swiotlb_buffer() becomes swiotlb_find_pool()
* is_xen_swiotlb_buffer() becomes xen_swiotlb_find_pool()
With these changes, a round-trip map/unmap pair requires only 2 pool
lookups (listed using the new names and wrappers):
dma_direct_unmap_page:
1. dma_direct_sync_single_for_cpu -> swiotlb_find_pool
2. swiotlb_tbl_unmap_single -> swiotlb_find_pool
These changes come from noticing the inefficiencies in a code review,
not from performance measurements. With CONFIG_SWIOTLB_DYNAMIC,
__swiotlb_find_pool() is not trivial, and it uses an RCU read lock,
so avoiding the redundant calls helps performance in a hot path.
When CONFIG_SWIOTLB_DYNAMIC is *not* set, the code size reduction
is minimal and the perf benefits are likely negligible, but no
harm is done.
No functional change is intended.
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Reviewed-by: Petr Tesarik <petr@tesarici.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2024-07-08 19:41:00 +00:00
|
|
|
return swiotlb_find_pool(dev, paddr);
|
|
|
|
return NULL;
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
2022-04-22 04:37:57 +00:00
|
|
|
#ifdef CONFIG_X86
|
2022-03-14 06:58:45 +00:00
|
|
|
int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2021-09-07 12:05:12 +00:00
|
|
|
int rc;
|
|
|
|
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
|
|
|
|
unsigned int i, dma_bits = order + PAGE_SHIFT;
|
2013-10-09 16:56:32 +00:00
|
|
|
dma_addr_t dma_handle;
|
2013-10-10 13:41:10 +00:00
|
|
|
phys_addr_t p = virt_to_phys(buf);
|
2010-05-11 14:05:49 +00:00
|
|
|
|
2021-09-07 12:05:12 +00:00
|
|
|
BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
|
|
|
|
BUG_ON(nslabs % IO_TLB_SEGSIZE);
|
2010-05-11 14:05:49 +00:00
|
|
|
|
|
|
|
i = 0;
|
|
|
|
do {
|
|
|
|
do {
|
|
|
|
rc = xen_create_contiguous_region(
|
2021-09-07 12:05:12 +00:00
|
|
|
p + (i << IO_TLB_SHIFT), order,
|
2013-10-09 16:56:32 +00:00
|
|
|
dma_bits, &dma_handle);
|
2019-09-02 08:39:58 +00:00
|
|
|
} while (rc && dma_bits++ < MAX_DMA_BITS);
|
2010-05-11 14:05:49 +00:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
|
2021-09-07 12:05:12 +00:00
|
|
|
i += IO_TLB_SEGSIZE;
|
2010-05-11 14:05:49 +00:00
|
|
|
} while (i < nslabs);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-21 11:15:13 +00:00
|
|
|
static void *
|
2022-04-22 04:37:57 +00:00
|
|
|
xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
|
|
|
|
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2022-04-22 04:37:57 +00:00
|
|
|
u64 dma_mask = dev->coherent_dma_mask;
|
2010-05-11 14:05:49 +00:00
|
|
|
int order = get_order(size);
|
2011-08-25 20:13:54 +00:00
|
|
|
phys_addr_t phys;
|
2022-04-22 04:37:57 +00:00
|
|
|
void *ret;
|
2010-05-11 14:05:49 +00:00
|
|
|
|
2022-04-22 04:37:57 +00:00
|
|
|
/* Align the allocation to the Xen page size */
|
2018-10-16 22:21:16 +00:00
|
|
|
size = 1UL << (order + XEN_PAGE_SHIFT);
|
|
|
|
|
2022-04-22 04:37:57 +00:00
|
|
|
ret = (void *)__get_free_pages(flags, get_order(size));
|
2011-08-25 20:13:54 +00:00
|
|
|
if (!ret)
|
|
|
|
return ret;
|
2022-04-22 04:37:57 +00:00
|
|
|
phys = virt_to_phys(ret);
|
|
|
|
|
|
|
|
*dma_handle = xen_phys_to_dma(dev, phys);
|
|
|
|
if (*dma_handle + size - 1 > dma_mask ||
|
|
|
|
range_straddles_page_boundary(phys, size)) {
|
|
|
|
if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
|
|
|
|
dma_handle) != 0)
|
|
|
|
goto out_free_pages;
|
2019-06-14 05:46:04 +00:00
|
|
|
SetPageXenRemapped(virt_to_page(ret));
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
2022-04-22 04:37:57 +00:00
|
|
|
|
2011-08-25 20:13:54 +00:00
|
|
|
memset(ret, 0, size);
|
2010-05-11 14:05:49 +00:00
|
|
|
return ret;
|
2022-04-22 04:37:57 +00:00
|
|
|
|
|
|
|
out_free_pages:
|
|
|
|
free_pages((unsigned long)ret, get_order(size));
|
|
|
|
return NULL;
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
2017-05-21 11:15:13 +00:00
|
|
|
static void
|
2022-04-22 04:37:57 +00:00
|
|
|
xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|
|
|
dma_addr_t dma_handle, unsigned long attrs)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2022-04-22 04:37:57 +00:00
|
|
|
phys_addr_t phys = virt_to_phys(vaddr);
|
2010-05-11 14:05:49 +00:00
|
|
|
int order = get_order(size);
|
2011-08-25 20:13:54 +00:00
|
|
|
|
2018-10-16 22:21:16 +00:00
|
|
|
/* Convert the size to actually allocated. */
|
|
|
|
size = 1UL << (order + XEN_PAGE_SHIFT);
|
|
|
|
|
2022-04-22 04:37:57 +00:00
|
|
|
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
|
|
|
|
WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
|
|
|
|
return;
|
2020-07-10 22:34:17 +00:00
|
|
|
|
2022-04-22 04:37:57 +00:00
|
|
|
if (TestClearPageXenRemapped(virt_to_page(vaddr)))
|
2013-10-10 13:41:10 +00:00
|
|
|
xen_destroy_contiguous_region(phys, order);
|
2022-04-22 04:37:57 +00:00
|
|
|
free_pages((unsigned long)vaddr, get_order(size));
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
2022-04-22 04:37:57 +00:00
|
|
|
#endif /* CONFIG_X86 */
|
2010-05-11 14:05:49 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Map a single buffer of the indicated size for DMA in streaming mode. The
|
|
|
|
* physical address to use is returned.
|
|
|
|
*
|
|
|
|
* Once the device is given the dma address, the device owns this memory until
|
|
|
|
* either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
|
|
|
|
*/
|
2017-05-21 11:15:13 +00:00
|
|
|
static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
2010-05-11 14:05:49 +00:00
|
|
|
unsigned long offset, size_t size,
|
|
|
|
enum dma_data_direction dir,
|
2016-08-03 20:46:00 +00:00
|
|
|
unsigned long attrs)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2012-10-15 17:19:39 +00:00
|
|
|
phys_addr_t map, phys = page_to_phys(page) + offset;
|
2020-07-10 22:34:25 +00:00
|
|
|
dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
|
2010-05-11 14:05:49 +00:00
|
|
|
|
|
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
/*
|
|
|
|
* If the address happens to be in the device's DMA window,
|
|
|
|
* we can safely return the device addr and not worry about bounce
|
|
|
|
* buffering it.
|
|
|
|
*/
|
2019-11-19 16:38:58 +00:00
|
|
|
if (dma_capable(dev, dev_addr, size, true) &&
|
2014-11-21 11:07:39 +00:00
|
|
|
!range_straddles_page_boundary(phys, size) &&
|
2015-09-09 14:17:33 +00:00
|
|
|
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
|
2021-06-24 15:55:20 +00:00
|
|
|
!is_swiotlb_force_bounce(dev))
|
2019-04-11 07:20:00 +00:00
|
|
|
goto done;
|
2010-05-11 14:05:49 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Oh well, have to allocate and map a bounce buffer.
|
|
|
|
*/
|
2022-03-29 15:27:33 +00:00
|
|
|
trace_swiotlb_bounced(dev, dev_addr, size);
|
2013-09-04 20:11:05 +00:00
|
|
|
|
swiotlb: remove alloc_size argument to swiotlb_tbl_map_single()
Currently swiotlb_tbl_map_single() takes alloc_align_mask and
alloc_size arguments to specify an swiotlb allocation that is larger
than mapping_size. This larger allocation is used solely by
iommu_dma_map_single() to handle untrusted devices that should not have
DMA visibility to memory pages that are partially used for unrelated
kernel data.
Having two arguments to specify the allocation is redundant. While
alloc_align_mask naturally specifies the alignment of the starting
address of the allocation, it can also implicitly specify the size
by rounding up the mapping_size to that alignment.
Additionally, the current approach has an edge case bug.
iommu_dma_map_page() already does the rounding up to compute the
alloc_size argument. But swiotlb_tbl_map_single() then calculates the
alignment offset based on the DMA min_align_mask, and adds that offset to
alloc_size. If the offset is non-zero, the addition may result in a value
that is larger than the max the swiotlb can allocate. If the rounding up
is done _after_ the alignment offset is added to the mapping_size (and
the original mapping_size conforms to the value returned by
swiotlb_max_mapping_size), then the max that the swiotlb can allocate
will not be exceeded.
In view of these issues, simplify the swiotlb_tbl_map_single() interface
by removing the alloc_size argument. Most call sites pass the same value
for mapping_size and alloc_size, and they pass alloc_align_mask as zero.
Just remove the redundant argument from these callers, as they will see
no functional change. For iommu_dma_map_page() also remove the alloc_size
argument, and have swiotlb_tbl_map_single() compute the alloc_size by
rounding up mapping_size after adding the offset based on min_align_mask.
This has the side effect of fixing the edge case bug but with no other
functional change.
Also add a sanity test on the alloc_align_mask. While IOMMU code
currently ensures the granule is not larger than PAGE_SIZE, if that
guarantee were to be removed in the future, the downstream effect on the
swiotlb might go unnoticed until strange allocation failures occurred.
Tested on an ARM64 system with 16K page size and some kernel test-only
hackery to allow modifying the DMA min_align_mask and the granule size
that becomes the alloc_align_mask. Tested these combinations with a
variety of original memory addresses and sizes, including those that
reproduce the edge case bug:
* 4K granule and 0 min_align_mask
* 4K granule and 0xFFF min_align_mask (4K - 1)
* 16K granule and 0xFFF min_align_mask
* 64K granule and 0xFFF min_align_mask
* 64K granule and 0x3FFF min_align_mask (16K - 1)
With the changes, all combinations pass.
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Reviewed-by: Petr Tesarik <petr@tesarici.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2024-04-08 04:11:41 +00:00
|
|
|
map = swiotlb_tbl_map_single(dev, phys, size, 0, dir, attrs);
|
2019-06-17 13:28:43 +00:00
|
|
|
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
|
2018-11-21 18:38:19 +00:00
|
|
|
return DMA_MAPPING_ERROR;
|
2010-05-11 14:05:49 +00:00
|
|
|
|
2019-09-05 08:04:30 +00:00
|
|
|
phys = map;
|
2020-07-10 22:34:25 +00:00
|
|
|
dev_addr = xen_phys_to_dma(dev, map);
|
2010-05-11 14:05:49 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that the address returned is DMA'ble
|
|
|
|
*/
|
2019-11-19 16:38:58 +00:00
|
|
|
if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
|
swiotlb: reduce swiotlb pool lookups
With CONFIG_SWIOTLB_DYNAMIC enabled, each round-trip map/unmap pair
in the swiotlb results in 6 calls to swiotlb_find_pool(). In multiple
places, the pool is found and used in one function, and then must
be found again in the next function that is called because only the
tlb_addr is passed as an argument. These are the six call sites:
dma_direct_map_page:
1. swiotlb_map -> swiotlb_tbl_map_single -> swiotlb_bounce
dma_direct_unmap_page:
2. dma_direct_sync_single_for_cpu -> is_swiotlb_buffer
3. dma_direct_sync_single_for_cpu -> swiotlb_sync_single_for_cpu ->
swiotlb_bounce
4. is_swiotlb_buffer
5. swiotlb_tbl_unmap_single -> swiotlb_del_transient
6. swiotlb_tbl_unmap_single -> swiotlb_release_slots
Reduce the number of calls by finding the pool at a higher level, and
passing it as an argument instead of searching again. A key change is
for is_swiotlb_buffer() to return a pool pointer instead of a boolean,
and then pass this pool pointer to subsequent swiotlb functions.
There are 9 occurrences of is_swiotlb_buffer() used to test if a buffer
is a swiotlb buffer before calling a swiotlb function. To reduce code
duplication in getting the pool pointer and passing it as an argument,
introduce inline wrappers for this pattern. The generated code is
essentially unchanged.
Since is_swiotlb_buffer() no longer returns a boolean, rename some
functions to reflect the change:
* swiotlb_find_pool() becomes __swiotlb_find_pool()
* is_swiotlb_buffer() becomes swiotlb_find_pool()
* is_xen_swiotlb_buffer() becomes xen_swiotlb_find_pool()
With these changes, a round-trip map/unmap pair requires only 2 pool
lookups (listed using the new names and wrappers):
dma_direct_unmap_page:
1. dma_direct_sync_single_for_cpu -> swiotlb_find_pool
2. swiotlb_tbl_unmap_single -> swiotlb_find_pool
These changes come from noticing the inefficiencies in a code review,
not from performance measurements. With CONFIG_SWIOTLB_DYNAMIC,
__swiotlb_find_pool() is not trivial, and it uses an RCU read lock,
so avoiding the redundant calls helps performance in a hot path.
When CONFIG_SWIOTLB_DYNAMIC is *not* set, the code size reduction
is minimal and the perf benefits are likely negligible, but no
harm is done.
No functional change is intended.
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Reviewed-by: Petr Tesarik <petr@tesarici.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2024-07-08 19:41:00 +00:00
|
|
|
__swiotlb_tbl_unmap_single(dev, map, size, dir,
|
|
|
|
attrs | DMA_ATTR_SKIP_CPU_SYNC,
|
|
|
|
swiotlb_find_pool(dev, map));
|
2019-04-11 07:20:00 +00:00
|
|
|
return DMA_MAPPING_ERROR;
|
|
|
|
}
|
2016-11-02 11:12:47 +00:00
|
|
|
|
2019-04-11 07:20:00 +00:00
|
|
|
done:
|
2020-07-10 22:34:26 +00:00
|
|
|
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
|
|
|
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
|
|
|
|
arch_sync_dma_for_device(phys, size, dir);
|
|
|
|
else
|
|
|
|
xen_dma_sync_for_device(dev, dev_addr, size, dir);
|
|
|
|
}
|
2019-04-11 07:20:00 +00:00
|
|
|
return dev_addr;
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unmap a single streaming mode DMA translation. The dma_addr and size must
|
|
|
|
* match what was provided for in a previous xen_swiotlb_map_page call. All
|
|
|
|
* other usages are undefined.
|
|
|
|
*
|
|
|
|
* After this call, reads by the cpu to the buffer are guaranteed to see
|
|
|
|
* whatever the device wrote there.
|
|
|
|
*/
|
2019-07-24 14:18:41 +00:00
|
|
|
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
|
|
|
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2020-07-10 22:34:25 +00:00
|
|
|
phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
|
swiotlb: reduce swiotlb pool lookups
With CONFIG_SWIOTLB_DYNAMIC enabled, each round-trip map/unmap pair
in the swiotlb results in 6 calls to swiotlb_find_pool(). In multiple
places, the pool is found and used in one function, and then must
be found again in the next function that is called because only the
tlb_addr is passed as an argument. These are the six call sites:
dma_direct_map_page:
1. swiotlb_map -> swiotlb_tbl_map_single -> swiotlb_bounce
dma_direct_unmap_page:
2. dma_direct_sync_single_for_cpu -> is_swiotlb_buffer
3. dma_direct_sync_single_for_cpu -> swiotlb_sync_single_for_cpu ->
swiotlb_bounce
4. is_swiotlb_buffer
5. swiotlb_tbl_unmap_single -> swiotlb_del_transient
6. swiotlb_tbl_unmap_single -> swiotlb_release_slots
Reduce the number of calls by finding the pool at a higher level, and
passing it as an argument instead of searching again. A key change is
for is_swiotlb_buffer() to return a pool pointer instead of a boolean,
and then pass this pool pointer to subsequent swiotlb functions.
There are 9 occurrences of is_swiotlb_buffer() used to test if a buffer
is a swiotlb buffer before calling a swiotlb function. To reduce code
duplication in getting the pool pointer and passing it as an argument,
introduce inline wrappers for this pattern. The generated code is
essentially unchanged.
Since is_swiotlb_buffer() no longer returns a boolean, rename some
functions to reflect the change:
* swiotlb_find_pool() becomes __swiotlb_find_pool()
* is_swiotlb_buffer() becomes swiotlb_find_pool()
* is_xen_swiotlb_buffer() becomes xen_swiotlb_find_pool()
With these changes, a round-trip map/unmap pair requires only 2 pool
lookups (listed using the new names and wrappers):
dma_direct_unmap_page:
1. dma_direct_sync_single_for_cpu -> swiotlb_find_pool
2. swiotlb_tbl_unmap_single -> swiotlb_find_pool
These changes come from noticing the inefficiencies in a code review,
not from performance measurements. With CONFIG_SWIOTLB_DYNAMIC,
__swiotlb_find_pool() is not trivial, and it uses an RCU read lock,
so avoiding the redundant calls helps performance in a hot path.
When CONFIG_SWIOTLB_DYNAMIC is *not* set, the code size reduction
is minimal and the perf benefits are likely negligible, but no
harm is done.
No functional change is intended.
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Reviewed-by: Petr Tesarik <petr@tesarici.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2024-07-08 19:41:00 +00:00
|
|
|
struct io_tlb_pool *pool;
|
2010-05-11 14:05:49 +00:00
|
|
|
|
|
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
|
2020-07-10 22:34:26 +00:00
|
|
|
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
|
|
|
if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
|
|
|
|
arch_sync_dma_for_cpu(paddr, size, dir);
|
|
|
|
else
|
|
|
|
xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
|
|
|
|
}
|
2013-10-25 10:33:25 +00:00
|
|
|
|
2010-05-11 14:05:49 +00:00
|
|
|
/* NOTE: We use dev_addr here, not paddr! */
|
swiotlb: reduce swiotlb pool lookups
With CONFIG_SWIOTLB_DYNAMIC enabled, each round-trip map/unmap pair
in the swiotlb results in 6 calls to swiotlb_find_pool(). In multiple
places, the pool is found and used in one function, and then must
be found again in the next function that is called because only the
tlb_addr is passed as an argument. These are the six call sites:
dma_direct_map_page:
1. swiotlb_map -> swiotlb_tbl_map_single -> swiotlb_bounce
dma_direct_unmap_page:
2. dma_direct_sync_single_for_cpu -> is_swiotlb_buffer
3. dma_direct_sync_single_for_cpu -> swiotlb_sync_single_for_cpu ->
swiotlb_bounce
4. is_swiotlb_buffer
5. swiotlb_tbl_unmap_single -> swiotlb_del_transient
6. swiotlb_tbl_unmap_single -> swiotlb_release_slots
Reduce the number of calls by finding the pool at a higher level, and
passing it as an argument instead of searching again. A key change is
for is_swiotlb_buffer() to return a pool pointer instead of a boolean,
and then pass this pool pointer to subsequent swiotlb functions.
There are 9 occurrences of is_swiotlb_buffer() used to test if a buffer
is a swiotlb buffer before calling a swiotlb function. To reduce code
duplication in getting the pool pointer and passing it as an argument,
introduce inline wrappers for this pattern. The generated code is
essentially unchanged.
Since is_swiotlb_buffer() no longer returns a boolean, rename some
functions to reflect the change:
* swiotlb_find_pool() becomes __swiotlb_find_pool()
* is_swiotlb_buffer() becomes swiotlb_find_pool()
* is_xen_swiotlb_buffer() becomes xen_swiotlb_find_pool()
With these changes, a round-trip map/unmap pair requires only 2 pool
lookups (listed using the new names and wrappers):
dma_direct_unmap_page:
1. dma_direct_sync_single_for_cpu -> swiotlb_find_pool
2. swiotlb_tbl_unmap_single -> swiotlb_find_pool
These changes come from noticing the inefficiencies in a code review,
not from performance measurements. With CONFIG_SWIOTLB_DYNAMIC,
__swiotlb_find_pool() is not trivial, and it uses an RCU read lock,
so avoiding the redundant calls helps performance in a hot path.
When CONFIG_SWIOTLB_DYNAMIC is *not* set, the code size reduction
is minimal and the perf benefits are likely negligible, but no
harm is done.
No functional change is intended.
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Reviewed-by: Petr Tesarik <petr@tesarici.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2024-07-08 19:41:00 +00:00
|
|
|
pool = xen_swiotlb_find_pool(hwdev, dev_addr);
|
|
|
|
if (pool)
|
|
|
|
__swiotlb_tbl_unmap_single(hwdev, paddr, size, dir,
|
|
|
|
attrs, pool);
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-04-11 07:19:59 +00:00
|
|
|
xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
|
|
|
|
size_t size, enum dma_data_direction dir)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2020-07-10 22:34:25 +00:00
|
|
|
phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
|
swiotlb: reduce swiotlb pool lookups
With CONFIG_SWIOTLB_DYNAMIC enabled, each round-trip map/unmap pair
in the swiotlb results in 6 calls to swiotlb_find_pool(). In multiple
places, the pool is found and used in one function, and then must
be found again in the next function that is called because only the
tlb_addr is passed as an argument. These are the six call sites:
dma_direct_map_page:
1. swiotlb_map -> swiotlb_tbl_map_single -> swiotlb_bounce
dma_direct_unmap_page:
2. dma_direct_sync_single_for_cpu -> is_swiotlb_buffer
3. dma_direct_sync_single_for_cpu -> swiotlb_sync_single_for_cpu ->
swiotlb_bounce
4. is_swiotlb_buffer
5. swiotlb_tbl_unmap_single -> swiotlb_del_transient
6. swiotlb_tbl_unmap_single -> swiotlb_release_slots
Reduce the number of calls by finding the pool at a higher level, and
passing it as an argument instead of searching again. A key change is
for is_swiotlb_buffer() to return a pool pointer instead of a boolean,
and then pass this pool pointer to subsequent swiotlb functions.
There are 9 occurrences of is_swiotlb_buffer() used to test if a buffer
is a swiotlb buffer before calling a swiotlb function. To reduce code
duplication in getting the pool pointer and passing it as an argument,
introduce inline wrappers for this pattern. The generated code is
essentially unchanged.
Since is_swiotlb_buffer() no longer returns a boolean, rename some
functions to reflect the change:
* swiotlb_find_pool() becomes __swiotlb_find_pool()
* is_swiotlb_buffer() becomes swiotlb_find_pool()
* is_xen_swiotlb_buffer() becomes xen_swiotlb_find_pool()
With these changes, a round-trip map/unmap pair requires only 2 pool
lookups (listed using the new names and wrappers):
dma_direct_unmap_page:
1. dma_direct_sync_single_for_cpu -> swiotlb_find_pool
2. swiotlb_tbl_unmap_single -> swiotlb_find_pool
These changes come from noticing the inefficiencies in a code review,
not from performance measurements. With CONFIG_SWIOTLB_DYNAMIC,
__swiotlb_find_pool() is not trivial, and it uses an RCU read lock,
so avoiding the redundant calls helps performance in a hot path.
When CONFIG_SWIOTLB_DYNAMIC is *not* set, the code size reduction
is minimal and the perf benefits are likely negligible, but no
harm is done.
No functional change is intended.
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Reviewed-by: Petr Tesarik <petr@tesarici.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2024-07-08 19:41:00 +00:00
|
|
|
struct io_tlb_pool *pool;
|
2013-10-25 10:33:25 +00:00
|
|
|
|
2020-07-10 22:34:26 +00:00
|
|
|
if (!dev_is_dma_coherent(dev)) {
|
|
|
|
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
|
|
|
|
arch_sync_dma_for_cpu(paddr, size, dir);
|
|
|
|
else
|
|
|
|
xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
|
|
|
|
}
|
2013-10-25 10:33:25 +00:00
|
|
|
|
swiotlb: reduce swiotlb pool lookups
With CONFIG_SWIOTLB_DYNAMIC enabled, each round-trip map/unmap pair
in the swiotlb results in 6 calls to swiotlb_find_pool(). In multiple
places, the pool is found and used in one function, and then must
be found again in the next function that is called because only the
tlb_addr is passed as an argument. These are the six call sites:
dma_direct_map_page:
1. swiotlb_map -> swiotlb_tbl_map_single -> swiotlb_bounce
dma_direct_unmap_page:
2. dma_direct_sync_single_for_cpu -> is_swiotlb_buffer
3. dma_direct_sync_single_for_cpu -> swiotlb_sync_single_for_cpu ->
swiotlb_bounce
4. is_swiotlb_buffer
5. swiotlb_tbl_unmap_single -> swiotlb_del_transient
6. swiotlb_tbl_unmap_single -> swiotlb_release_slots
Reduce the number of calls by finding the pool at a higher level, and
passing it as an argument instead of searching again. A key change is
for is_swiotlb_buffer() to return a pool pointer instead of a boolean,
and then pass this pool pointer to subsequent swiotlb functions.
There are 9 occurrences of is_swiotlb_buffer() used to test if a buffer
is a swiotlb buffer before calling a swiotlb function. To reduce code
duplication in getting the pool pointer and passing it as an argument,
introduce inline wrappers for this pattern. The generated code is
essentially unchanged.
Since is_swiotlb_buffer() no longer returns a boolean, rename some
functions to reflect the change:
* swiotlb_find_pool() becomes __swiotlb_find_pool()
* is_swiotlb_buffer() becomes swiotlb_find_pool()
* is_xen_swiotlb_buffer() becomes xen_swiotlb_find_pool()
With these changes, a round-trip map/unmap pair requires only 2 pool
lookups (listed using the new names and wrappers):
dma_direct_unmap_page:
1. dma_direct_sync_single_for_cpu -> swiotlb_find_pool
2. swiotlb_tbl_unmap_single -> swiotlb_find_pool
These changes come from noticing the inefficiencies in a code review,
not from performance measurements. With CONFIG_SWIOTLB_DYNAMIC,
__swiotlb_find_pool() is not trivial, and it uses an RCU read lock,
so avoiding the redundant calls helps performance in a hot path.
When CONFIG_SWIOTLB_DYNAMIC is *not* set, the code size reduction
is minimal and the perf benefits are likely negligible, but no
harm is done.
No functional change is intended.
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Reviewed-by: Petr Tesarik <petr@tesarici.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2024-07-08 19:41:00 +00:00
|
|
|
pool = xen_swiotlb_find_pool(dev, dma_addr);
|
|
|
|
if (pool)
|
|
|
|
__swiotlb_sync_single_for_cpu(dev, paddr, size, dir, pool);
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
2019-04-11 07:19:59 +00:00
|
|
|
static void
|
|
|
|
xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
|
|
|
|
size_t size, enum dma_data_direction dir)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
2020-07-10 22:34:25 +00:00
|
|
|
phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
|
swiotlb: reduce swiotlb pool lookups
With CONFIG_SWIOTLB_DYNAMIC enabled, each round-trip map/unmap pair
in the swiotlb results in 6 calls to swiotlb_find_pool(). In multiple
places, the pool is found and used in one function, and then must
be found again in the next function that is called because only the
tlb_addr is passed as an argument. These are the six call sites:
dma_direct_map_page:
1. swiotlb_map -> swiotlb_tbl_map_single -> swiotlb_bounce
dma_direct_unmap_page:
2. dma_direct_sync_single_for_cpu -> is_swiotlb_buffer
3. dma_direct_sync_single_for_cpu -> swiotlb_sync_single_for_cpu ->
swiotlb_bounce
4. is_swiotlb_buffer
5. swiotlb_tbl_unmap_single -> swiotlb_del_transient
6. swiotlb_tbl_unmap_single -> swiotlb_release_slots
Reduce the number of calls by finding the pool at a higher level, and
passing it as an argument instead of searching again. A key change is
for is_swiotlb_buffer() to return a pool pointer instead of a boolean,
and then pass this pool pointer to subsequent swiotlb functions.
There are 9 occurrences of is_swiotlb_buffer() used to test if a buffer
is a swiotlb buffer before calling a swiotlb function. To reduce code
duplication in getting the pool pointer and passing it as an argument,
introduce inline wrappers for this pattern. The generated code is
essentially unchanged.
Since is_swiotlb_buffer() no longer returns a boolean, rename some
functions to reflect the change:
* swiotlb_find_pool() becomes __swiotlb_find_pool()
* is_swiotlb_buffer() becomes swiotlb_find_pool()
* is_xen_swiotlb_buffer() becomes xen_swiotlb_find_pool()
With these changes, a round-trip map/unmap pair requires only 2 pool
lookups (listed using the new names and wrappers):
dma_direct_unmap_page:
1. dma_direct_sync_single_for_cpu -> swiotlb_find_pool
2. swiotlb_tbl_unmap_single -> swiotlb_find_pool
These changes come from noticing the inefficiencies in a code review,
not from performance measurements. With CONFIG_SWIOTLB_DYNAMIC,
__swiotlb_find_pool() is not trivial, and it uses an RCU read lock,
so avoiding the redundant calls helps performance in a hot path.
When CONFIG_SWIOTLB_DYNAMIC is *not* set, the code size reduction
is minimal and the perf benefits are likely negligible, but no
harm is done.
No functional change is intended.
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Reviewed-by: Petr Tesarik <petr@tesarici.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2024-07-08 19:41:00 +00:00
|
|
|
struct io_tlb_pool *pool;
|
2010-05-11 14:05:49 +00:00
|
|
|
|
swiotlb: reduce swiotlb pool lookups
With CONFIG_SWIOTLB_DYNAMIC enabled, each round-trip map/unmap pair
in the swiotlb results in 6 calls to swiotlb_find_pool(). In multiple
places, the pool is found and used in one function, and then must
be found again in the next function that is called because only the
tlb_addr is passed as an argument. These are the six call sites:
dma_direct_map_page:
1. swiotlb_map -> swiotlb_tbl_map_single -> swiotlb_bounce
dma_direct_unmap_page:
2. dma_direct_sync_single_for_cpu -> is_swiotlb_buffer
3. dma_direct_sync_single_for_cpu -> swiotlb_sync_single_for_cpu ->
swiotlb_bounce
4. is_swiotlb_buffer
5. swiotlb_tbl_unmap_single -> swiotlb_del_transient
6. swiotlb_tbl_unmap_single -> swiotlb_release_slots
Reduce the number of calls by finding the pool at a higher level, and
passing it as an argument instead of searching again. A key change is
for is_swiotlb_buffer() to return a pool pointer instead of a boolean,
and then pass this pool pointer to subsequent swiotlb functions.
There are 9 occurrences of is_swiotlb_buffer() used to test if a buffer
is a swiotlb buffer before calling a swiotlb function. To reduce code
duplication in getting the pool pointer and passing it as an argument,
introduce inline wrappers for this pattern. The generated code is
essentially unchanged.
Since is_swiotlb_buffer() no longer returns a boolean, rename some
functions to reflect the change:
* swiotlb_find_pool() becomes __swiotlb_find_pool()
* is_swiotlb_buffer() becomes swiotlb_find_pool()
* is_xen_swiotlb_buffer() becomes xen_swiotlb_find_pool()
With these changes, a round-trip map/unmap pair requires only 2 pool
lookups (listed using the new names and wrappers):
dma_direct_unmap_page:
1. dma_direct_sync_single_for_cpu -> swiotlb_find_pool
2. swiotlb_tbl_unmap_single -> swiotlb_find_pool
These changes come from noticing the inefficiencies in a code review,
not from performance measurements. With CONFIG_SWIOTLB_DYNAMIC,
__swiotlb_find_pool() is not trivial, and it uses an RCU read lock,
so avoiding the redundant calls helps performance in a hot path.
When CONFIG_SWIOTLB_DYNAMIC is *not* set, the code size reduction
is minimal and the perf benefits are likely negligible, but no
harm is done.
No functional change is intended.
Signed-off-by: Michael Kelley <mhklinux@outlook.com>
Reviewed-by: Petr Tesarik <petr@tesarici.cz>
Signed-off-by: Christoph Hellwig <hch@lst.de>
2024-07-08 19:41:00 +00:00
|
|
|
pool = xen_swiotlb_find_pool(dev, dma_addr);
|
|
|
|
if (pool)
|
|
|
|
__swiotlb_sync_single_for_device(dev, paddr, size, dir, pool);
|
2019-04-11 07:19:59 +00:00
|
|
|
|
2020-07-10 22:34:26 +00:00
|
|
|
if (!dev_is_dma_coherent(dev)) {
|
|
|
|
if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
|
|
|
|
arch_sync_dma_for_device(paddr, size, dir);
|
|
|
|
else
|
|
|
|
xen_dma_sync_for_device(dev, dma_addr, size, dir);
|
|
|
|
}
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
2017-05-21 11:15:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
|
|
|
|
* concerning calls here are the same as for swiotlb_unmap_page() above.
|
|
|
|
*/
|
|
|
|
static void
|
2019-04-11 07:19:57 +00:00
|
|
|
xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
|
|
|
enum dma_data_direction dir, unsigned long attrs)
|
2017-05-21 11:15:13 +00:00
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
|
|
|
|
for_each_sg(sgl, sg, nelems, i)
|
2019-07-24 14:18:41 +00:00
|
|
|
xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
|
|
|
|
dir, attrs);
|
2017-05-21 11:15:13 +00:00
|
|
|
|
|
|
|
}
|
2010-05-11 14:05:49 +00:00
|
|
|
|
2017-05-21 11:15:13 +00:00
|
|
|
static int
|
2019-04-11 07:19:58 +00:00
|
|
|
xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
|
2019-04-11 07:19:57 +00:00
|
|
|
enum dma_data_direction dir, unsigned long attrs)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
BUG_ON(dir == DMA_NONE);
|
|
|
|
|
|
|
|
for_each_sg(sgl, sg, nelems, i) {
|
2019-04-11 07:19:58 +00:00
|
|
|
sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
|
|
|
|
sg->offset, sg->length, dir, attrs);
|
|
|
|
if (sg->dma_address == DMA_MAPPING_ERROR)
|
|
|
|
goto out_unmap;
|
2013-08-05 16:30:48 +00:00
|
|
|
sg_dma_len(sg) = sg->length;
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
2019-04-11 07:19:58 +00:00
|
|
|
|
2010-05-11 14:05:49 +00:00
|
|
|
return nelems;
|
2019-04-11 07:19:58 +00:00
|
|
|
out_unmap:
|
|
|
|
xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
|
|
|
sg_dma_len(sgl) = 0;
|
2021-07-29 20:15:35 +00:00
|
|
|
return -EIO;
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-04-11 07:19:59 +00:00
|
|
|
xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
|
|
|
|
int nelems, enum dma_data_direction dir)
|
2010-05-11 14:05:49 +00:00
|
|
|
{
|
|
|
|
struct scatterlist *sg;
|
|
|
|
int i;
|
|
|
|
|
2019-04-11 07:19:59 +00:00
|
|
|
for_each_sg(sgl, sg, nelems, i) {
|
|
|
|
xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
|
|
|
|
sg->length, dir);
|
|
|
|
}
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
2017-05-21 11:15:13 +00:00
|
|
|
static void
|
2019-04-11 07:19:59 +00:00
|
|
|
xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
|
2010-05-11 14:05:49 +00:00
|
|
|
int nelems, enum dma_data_direction dir)
|
|
|
|
{
|
2019-04-11 07:19:59 +00:00
|
|
|
struct scatterlist *sg;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_sg(sgl, sg, nelems, i) {
|
|
|
|
xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
|
|
|
|
sg->length, dir);
|
|
|
|
}
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return whether the given device DMA address mask can be supported
|
|
|
|
* properly. For example, if your device can only drive the low 24-bits
|
|
|
|
* during bus mastering, then you would pass 0x00ffffff as the mask to
|
|
|
|
* this function.
|
|
|
|
*/
|
2017-05-21 11:15:13 +00:00
|
|
|
static int
|
2010-05-11 14:05:49 +00:00
|
|
|
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
|
|
|
{
|
2023-08-01 06:23:57 +00:00
|
|
|
return xen_phys_to_dma(hwdev, default_swiotlb_limit()) <= mask;
|
2010-05-11 14:05:49 +00:00
|
|
|
}
|
2013-10-09 16:56:33 +00:00
|
|
|
|
2017-05-21 11:15:13 +00:00
|
|
|
const struct dma_map_ops xen_swiotlb_dma_ops = {
|
2022-04-22 04:37:57 +00:00
|
|
|
#ifdef CONFIG_X86
|
2017-05-21 11:15:13 +00:00
|
|
|
.alloc = xen_swiotlb_alloc_coherent,
|
|
|
|
.free = xen_swiotlb_free_coherent,
|
2022-04-22 04:37:57 +00:00
|
|
|
#else
|
|
|
|
.alloc = dma_direct_alloc,
|
|
|
|
.free = dma_direct_free,
|
|
|
|
#endif
|
2017-05-21 11:15:13 +00:00
|
|
|
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
|
|
|
|
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
|
|
|
|
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
|
|
|
|
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
|
2019-04-11 07:19:57 +00:00
|
|
|
.map_sg = xen_swiotlb_map_sg,
|
|
|
|
.unmap_sg = xen_swiotlb_unmap_sg,
|
2017-05-21 11:15:13 +00:00
|
|
|
.map_page = xen_swiotlb_map_page,
|
|
|
|
.unmap_page = xen_swiotlb_unmap_page,
|
|
|
|
.dma_supported = xen_swiotlb_dma_supported,
|
2019-09-02 08:45:39 +00:00
|
|
|
.mmap = dma_common_mmap,
|
|
|
|
.get_sgtable = dma_common_get_sgtable,
|
2024-03-21 16:36:39 +00:00
|
|
|
.alloc_pages_op = dma_common_alloc_pages,
|
2020-09-01 11:34:33 +00:00
|
|
|
.free_pages = dma_common_free_pages,
|
2023-11-06 17:12:30 +00:00
|
|
|
.max_mapping_size = swiotlb_max_mapping_size,
|
2017-05-21 11:15:13 +00:00
|
|
|
};
|