mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
dma-maping updates for Linux 6.6
- allow dynamic sizing of the swiotlb buffer, to cater for secure virtualization workloads that require all I/O to be bounce buffered (Petr Tesarik) - move a declaration to a header (Arnd Bergmann) - check for memory region overlap in dma-contiguous (Binglei Wang) - remove the somewhat dangerous runtime swiotlb-xen enablement and unexport is_swiotlb_active (Christoph Hellwig, Juergen Gross) - per-node CMA improvements (Yajun Deng) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmTuDHkLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYOqvhAApMk2/ceTgVH17sXaKE822+xKvgv377O6TlggMeGG W4zA0KD69DNz0AfaaCc5U5f7n8Ld/YY1RsvkHW4b3jgw+KRTeQr0jjitBgP5kP2M A1+qxdyJpCTwiPt9s2+JFVPeyZ0s52V6OJODKRG3s0ore55R+U09VySKtASON+q3 GMKfWqQteKC+thg7NkrQ7JUixuo84oICws+rZn4K9ifsX2O0HYW6aMW0feRfZjJH r0TgqZc4RdPTSaF22oapR9Ls39+7hp/pBvoLm5sBNA3cl5C3X4VWo9ERMU1jW9h+ VYQv39NycUspgskWJmpbU06/+ooYqQlwHSR/vdNusmFIvxo4tf6/UX72YO5F8Dar ap0wYGauiEwTjSnhVxPTXk3obWyWEsgFAeRnPdTlH2CNmv38QZU2HLb8eU1pcXxX j+WI2Ewy9z22uBVYiPOKpdW1jkSfmlmfPp/8SbAdua7I3YQ90rQN6AvU06zAi/cL NQTgO81E4jPkygqAVgS/LeYziWAQ73yM7m9ExThtTgqFtHortwhJ4Fd8XKtvtvEb viXAZ/WZtQBv/CIKAW98NhgIDP/SPOT8ym6V35WK+kkNFMS6LMSQUfl9GgbHGyFa n9icMm7BmbDtT1+AKNafG9En4DtAf9M9QNidAVOyfrsIk6S0gZoZwvIStkA7on8a cNY= =kVVr -----END PGP SIGNATURE----- Merge tag 'dma-mapping-6.6-2023-08-29' of git://git.infradead.org/users/hch/dma-mapping Pull dma-maping updates from Christoph Hellwig: - allow dynamic sizing of the swiotlb buffer, to cater for secure virtualization workloads that require all I/O to be bounce buffered (Petr Tesarik) - move a declaration to a header (Arnd Bergmann) - check for memory region overlap in dma-contiguous (Binglei Wang) - remove the somewhat dangerous runtime swiotlb-xen enablement and unexport is_swiotlb_active (Christoph Hellwig, Juergen Gross) - per-node CMA improvements (Yajun Deng) * tag 'dma-mapping-6.6-2023-08-29' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: optimize get_max_slots() swiotlb: move slot allocation explanation comment where it belongs swiotlb: search the software IO TLB only if the device makes use of it swiotlb: allocate a new memory pool when existing pools are full swiotlb: determine potential physical address limit swiotlb: if swiotlb is full, fall back to a transient memory pool swiotlb: add a flag whether SWIOTLB is allowed to grow swiotlb: separate memory pool data from other allocator data swiotlb: add documentation and rename swiotlb_do_find_slots() swiotlb: make io_tlb_default_mem local to swiotlb.c swiotlb: bail out of swiotlb_init_late() if swiotlb is already allocated dma-contiguous: check for memory region overlap dma-contiguous: support numa CMA for specified node dma-contiguous: support per-numa CMA for all architectures dma-mapping: move arch_dma_set_mask() declaration to header swiotlb: unexport is_swiotlb_active x86: always initialize xen-swiotlb when xen-pcifront is enabling xen/pci: add flag for PCI passthrough being possible
This commit is contained in:
commit
6c1b980a7e
@ -696,7 +696,7 @@
|
||||
kernel/dma/contiguous.c
|
||||
|
||||
cma_pernuma=nn[MG]
|
||||
[ARM64,KNL,CMA]
|
||||
[KNL,CMA]
|
||||
Sets the size of kernel per-numa memory area for
|
||||
contiguous memory allocations. A value of 0 disables
|
||||
per-numa CMA altogether. And If this option is not
|
||||
@ -706,6 +706,17 @@
|
||||
which is located in node nid, if the allocation fails,
|
||||
they will fallback to the global default memory area.
|
||||
|
||||
numa_cma=<node>:nn[MG][,<node>:nn[MG]]
|
||||
[KNL,CMA]
|
||||
Sets the size of kernel numa memory area for
|
||||
contiguous memory allocations. It will reserve CMA
|
||||
area for the specified node.
|
||||
|
||||
With numa CMA enabled, DMA users on node nid will
|
||||
first try to allocate buffer from the numa area
|
||||
which is located in node nid, if the allocation fails,
|
||||
they will fallback to the global default memory area.
|
||||
|
||||
cmo_free_hint= [PPC] Format: { yes | no }
|
||||
Specify whether pages are marked as being inactive
|
||||
when they are freed. This is used in CMO environments
|
||||
|
@ -125,12 +125,10 @@ static int __init xen_mm_init(void)
|
||||
return 0;
|
||||
|
||||
/* we can work with the default swiotlb */
|
||||
if (!io_tlb_default_mem.nslabs) {
|
||||
rc = swiotlb_init_late(swiotlb_size_or_default(),
|
||||
xen_swiotlb_gfp(), NULL);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
}
|
||||
rc = swiotlb_init_late(swiotlb_size_or_default(),
|
||||
xen_swiotlb_gfp(), NULL);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
cflush.op = 0;
|
||||
cflush.a.dev_bus_addr = 0;
|
||||
|
@ -461,8 +461,6 @@ void __init bootmem_init(void)
|
||||
arm64_hugetlb_cma_reserve();
|
||||
#endif
|
||||
|
||||
dma_pernuma_cma_reserve();
|
||||
|
||||
kvm_hyp_reserve();
|
||||
|
||||
/*
|
||||
|
@ -664,7 +664,7 @@ static int __init octeon_pci_setup(void)
|
||||
|
||||
/* BAR1 movable regions contiguous to cover the swiotlb */
|
||||
octeon_bar1_pci_phys =
|
||||
io_tlb_default_mem.start & ~((1ull << 22) - 1);
|
||||
default_swiotlb_base() & ~((1ull << 22) - 1);
|
||||
|
||||
for (index = 0; index < 32; index++) {
|
||||
union cvmx_pci_bar1_indexx bar1_index;
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/machdep.h>
|
||||
|
||||
|
@ -2,12 +2,6 @@
|
||||
#ifndef _ASM_X86_SWIOTLB_XEN_H
|
||||
#define _ASM_X86_SWIOTLB_XEN_H
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_XEN
|
||||
extern int pci_xen_swiotlb_init_late(void);
|
||||
#else
|
||||
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
|
||||
#endif
|
||||
|
||||
int xen_swiotlb_fixup(void *buf, unsigned long nslabs);
|
||||
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
|
||||
unsigned int address_bits,
|
||||
|
@ -72,9 +72,15 @@ static inline void __init pci_swiotlb_detect(void)
|
||||
#endif /* CONFIG_SWIOTLB */
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_XEN
|
||||
static bool xen_swiotlb_enabled(void)
|
||||
{
|
||||
return xen_initial_domain() || x86_swiotlb_enable ||
|
||||
(IS_ENABLED(CONFIG_XEN_PCIDEV_FRONTEND) && xen_pv_pci_possible);
|
||||
}
|
||||
|
||||
static void __init pci_xen_swiotlb_init(void)
|
||||
{
|
||||
if (!xen_initial_domain() && !x86_swiotlb_enable)
|
||||
if (!xen_swiotlb_enabled())
|
||||
return;
|
||||
x86_swiotlb_enable = true;
|
||||
x86_swiotlb_flags |= SWIOTLB_ANY;
|
||||
@ -83,27 +89,6 @@ static void __init pci_xen_swiotlb_init(void)
|
||||
if (IS_ENABLED(CONFIG_PCI))
|
||||
pci_request_acs();
|
||||
}
|
||||
|
||||
int pci_xen_swiotlb_init_late(void)
|
||||
{
|
||||
if (dma_ops == &xen_swiotlb_dma_ops)
|
||||
return 0;
|
||||
|
||||
/* we can work with the default swiotlb */
|
||||
if (!io_tlb_default_mem.nslabs) {
|
||||
int rc = swiotlb_init_late(swiotlb_size_or_default(),
|
||||
GFP_KERNEL, xen_swiotlb_fixup);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* XXX: this switches the dma ops under live devices! */
|
||||
dma_ops = &xen_swiotlb_dma_ops;
|
||||
if (IS_ENABLED(CONFIG_PCI))
|
||||
pci_request_acs();
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_xen_swiotlb_init_late);
|
||||
#else
|
||||
static inline void __init pci_xen_swiotlb_init(void)
|
||||
{
|
||||
|
@ -44,6 +44,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
|
||||
/* Number of pages released from the initial allocation. */
|
||||
unsigned long xen_released_pages;
|
||||
|
||||
/* Memory map would allow PCI passthrough. */
|
||||
bool xen_pv_pci_possible;
|
||||
|
||||
/* E820 map used during setting up memory. */
|
||||
static struct e820_table xen_e820_table __initdata;
|
||||
|
||||
@ -814,6 +817,9 @@ char * __init xen_memory_setup(void)
|
||||
chunk_size = size;
|
||||
type = xen_e820_table.entries[i].type;
|
||||
|
||||
if (type == E820_TYPE_RESERVED)
|
||||
xen_pv_pci_possible = true;
|
||||
|
||||
if (type == E820_TYPE_RAM) {
|
||||
if (addr < mem_end) {
|
||||
chunk_size = min(size, mem_end - addr);
|
||||
|
@ -3108,9 +3108,7 @@ void device_initialize(struct device *dev)
|
||||
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
|
||||
dev->dma_coherent = dma_default_coherent;
|
||||
#endif
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
dev->dma_io_tlb_mem = &io_tlb_default_mem;
|
||||
#endif
|
||||
swiotlb_dev_init(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_initialize);
|
||||
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <xen/platform_pci.h>
|
||||
|
||||
#include <asm/xen/swiotlb-xen.h>
|
||||
@ -669,11 +668,6 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
|
||||
|
||||
spin_unlock(&pcifront_dev_lock);
|
||||
|
||||
if (!err && !is_swiotlb_active(&pdev->xdev->dev)) {
|
||||
err = pci_xen_swiotlb_init_late();
|
||||
if (err)
|
||||
dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -381,7 +381,7 @@ xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
|
||||
static int
|
||||
xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
||||
{
|
||||
return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
|
||||
return xen_phys_to_dma(hwdev, default_swiotlb_limit()) <= mask;
|
||||
}
|
||||
|
||||
const struct dma_map_ops xen_swiotlb_dma_ops = {
|
||||
|
@ -625,7 +625,10 @@ struct device_physical_location {
|
||||
* @dma_pools: Dma pools (if dma'ble device).
|
||||
* @dma_mem: Internal for coherent mem override.
|
||||
* @cma_area: Contiguous memory area for dma allocations
|
||||
* @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use.
|
||||
* @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use.
|
||||
* @dma_io_tlb_pools: List of transient swiotlb memory pools.
|
||||
* @dma_io_tlb_lock: Protects changes to the list of active pools.
|
||||
* @dma_uses_io_tlb: %true if device has used the software IO TLB.
|
||||
* @archdata: For arch-specific additions.
|
||||
* @of_node: Associated device tree node.
|
||||
* @fwnode: Associated device node supplied by platform firmware.
|
||||
@ -731,6 +734,11 @@ struct device {
|
||||
#endif
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
struct io_tlb_mem *dma_io_tlb_mem;
|
||||
#endif
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
struct list_head dma_io_tlb_pools;
|
||||
spinlock_t dma_io_tlb_lock;
|
||||
bool dma_uses_io_tlb;
|
||||
#endif
|
||||
/* arch specific additions */
|
||||
struct dev_archdata archdata;
|
||||
|
@ -169,12 +169,6 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
|
||||
}
|
||||
#endif /* CONFIG_DMA_CMA*/
|
||||
|
||||
#ifdef CONFIG_DMA_PERNUMA_CMA
|
||||
void dma_pernuma_cma_reserve(void);
|
||||
#else
|
||||
static inline void dma_pernuma_cma_reserve(void) { }
|
||||
#endif /* CONFIG_DMA_PERNUMA_CMA */
|
||||
|
||||
#ifdef CONFIG_DMA_DECLARE_COHERENT
|
||||
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
||||
dma_addr_t device_addr, size_t size);
|
||||
@ -343,6 +337,12 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_addr, unsigned long attrs);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
|
||||
void arch_dma_set_mask(struct device *dev, u64 mask);
|
||||
#else
|
||||
#define arch_dma_set_mask(dev, mask) do { } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* Page protection so that devices that can't snoop CPU caches can use the
|
||||
|
@ -418,6 +418,8 @@ static inline void dma_sync_sgtable_for_device(struct device *dev,
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
|
||||
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
|
||||
|
||||
bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
struct device;
|
||||
struct page;
|
||||
@ -62,8 +63,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
|
||||
/**
|
||||
* struct io_tlb_mem - IO TLB Memory Pool Descriptor
|
||||
*
|
||||
* struct io_tlb_pool - IO TLB memory pool descriptor
|
||||
* @start: The start address of the swiotlb memory pool. Used to do a quick
|
||||
* range check to see if the memory was in fact allocated by this
|
||||
* API.
|
||||
@ -73,19 +73,48 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
|
||||
* @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
|
||||
* may be remapped in the memory encrypted case and store virtual
|
||||
* address for bounce buffer operation.
|
||||
* @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
|
||||
* @end. For default swiotlb, this is command line adjustable via
|
||||
* setup_io_tlb_npages.
|
||||
* @list: The free list describing the number of free entries available
|
||||
* from each index.
|
||||
* @orig_addr: The original address corresponding to a mapped entry.
|
||||
* @alloc_size: Size of the allocated buffer.
|
||||
* @nslabs: The number of IO TLB slots between @start and @end. For the
|
||||
* default swiotlb, this can be adjusted with a boot parameter,
|
||||
* see setup_io_tlb_npages().
|
||||
* @late_alloc: %true if allocated using the page allocator.
|
||||
* @nareas: Number of areas in the pool.
|
||||
* @area_nslabs: Number of slots in each area.
|
||||
* @areas: Array of memory area descriptors.
|
||||
* @slots: Array of slot descriptors.
|
||||
* @node: Member of the IO TLB memory pool list.
|
||||
* @rcu: RCU head for swiotlb_dyn_free().
|
||||
* @transient: %true if transient memory pool.
|
||||
*/
|
||||
struct io_tlb_pool {
|
||||
phys_addr_t start;
|
||||
phys_addr_t end;
|
||||
void *vaddr;
|
||||
unsigned long nslabs;
|
||||
bool late_alloc;
|
||||
unsigned int nareas;
|
||||
unsigned int area_nslabs;
|
||||
struct io_tlb_area *areas;
|
||||
struct io_tlb_slot *slots;
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
struct list_head node;
|
||||
struct rcu_head rcu;
|
||||
bool transient;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
* struct io_tlb_mem - Software IO TLB allocator
|
||||
* @defpool: Default (initial) IO TLB memory pool descriptor.
|
||||
* @pool: IO TLB memory pool descriptor (if not dynamic).
|
||||
* @nslabs: Total number of IO TLB slabs in all pools.
|
||||
* @debugfs: The dentry to debugfs.
|
||||
* @late_alloc: %true if allocated using the page allocator
|
||||
* @force_bounce: %true if swiotlb bouncing is forced
|
||||
* @for_alloc: %true if the pool is used for memory allocation
|
||||
* @nareas: The area number in the pool.
|
||||
* @area_nslabs: The slot number in the area.
|
||||
* @can_grow: %true if more pools can be allocated dynamically.
|
||||
* @phys_limit: Maximum allowed physical address.
|
||||
* @lock: Lock to synchronize changes to the list.
|
||||
* @pools: List of IO TLB memory pool descriptors (if dynamic).
|
||||
* @dyn_alloc: Dynamic IO TLB pool allocation work.
|
||||
* @total_used: The total number of slots in the pool that are currently used
|
||||
* across all areas. Used only for calculating used_hiwater in
|
||||
* debugfs.
|
||||
@ -93,30 +122,64 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
|
||||
* in debugfs.
|
||||
*/
|
||||
struct io_tlb_mem {
|
||||
phys_addr_t start;
|
||||
phys_addr_t end;
|
||||
void *vaddr;
|
||||
struct io_tlb_pool defpool;
|
||||
unsigned long nslabs;
|
||||
struct dentry *debugfs;
|
||||
bool late_alloc;
|
||||
bool force_bounce;
|
||||
bool for_alloc;
|
||||
unsigned int nareas;
|
||||
unsigned int area_nslabs;
|
||||
struct io_tlb_area *areas;
|
||||
struct io_tlb_slot *slots;
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
bool can_grow;
|
||||
u64 phys_limit;
|
||||
spinlock_t lock;
|
||||
struct list_head pools;
|
||||
struct work_struct dyn_alloc;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
atomic_long_t total_used;
|
||||
atomic_long_t used_hiwater;
|
||||
#endif
|
||||
};
|
||||
extern struct io_tlb_mem io_tlb_default_mem;
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
|
||||
struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr);
|
||||
|
||||
#else
|
||||
|
||||
static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
|
||||
phys_addr_t paddr)
|
||||
{
|
||||
return &dev->dma_io_tlb_mem->defpool;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* is_swiotlb_buffer() - check if a physical address belongs to a swiotlb
|
||||
* @dev: Device which has mapped the buffer.
|
||||
* @paddr: Physical address within the DMA buffer.
|
||||
*
|
||||
* Check if @paddr points into a bounce buffer.
|
||||
*
|
||||
* Return:
|
||||
* * %true if @paddr points into a bounce buffer
|
||||
* * %false otherwise
|
||||
*/
|
||||
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
|
||||
return mem && paddr >= mem->start && paddr < mem->end;
|
||||
if (!mem)
|
||||
return false;
|
||||
|
||||
if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) {
|
||||
/* Pairs with smp_wmb() in swiotlb_find_slots() and
|
||||
* swiotlb_dyn_alloc(), which modify the RCU lists.
|
||||
*/
|
||||
smp_rmb();
|
||||
return swiotlb_find_pool(dev, paddr);
|
||||
}
|
||||
return paddr >= mem->defpool.start && paddr < mem->defpool.end;
|
||||
}
|
||||
|
||||
static inline bool is_swiotlb_force_bounce(struct device *dev)
|
||||
@ -128,13 +191,22 @@ static inline bool is_swiotlb_force_bounce(struct device *dev)
|
||||
|
||||
void swiotlb_init(bool addressing_limited, unsigned int flags);
|
||||
void __init swiotlb_exit(void);
|
||||
void swiotlb_dev_init(struct device *dev);
|
||||
size_t swiotlb_max_mapping_size(struct device *dev);
|
||||
bool is_swiotlb_allocated(void);
|
||||
bool is_swiotlb_active(struct device *dev);
|
||||
void __init swiotlb_adjust_size(unsigned long size);
|
||||
phys_addr_t default_swiotlb_base(void);
|
||||
phys_addr_t default_swiotlb_limit(void);
|
||||
#else
|
||||
static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void swiotlb_dev_init(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return false;
|
||||
@ -151,6 +223,11 @@ static inline size_t swiotlb_max_mapping_size(struct device *dev)
|
||||
return SIZE_MAX;
|
||||
}
|
||||
|
||||
static inline bool is_swiotlb_allocated(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_swiotlb_active(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
@ -159,6 +236,16 @@ static inline bool is_swiotlb_active(struct device *dev)
|
||||
static inline void swiotlb_adjust_size(unsigned long size)
|
||||
{
|
||||
}
|
||||
|
||||
static inline phys_addr_t default_swiotlb_base(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline phys_addr_t default_swiotlb_limit(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SWIOTLB */
|
||||
|
||||
extern void swiotlb_print_info(void);
|
||||
|
@ -29,6 +29,12 @@ extern bool xen_pvh;
|
||||
|
||||
extern uint32_t xen_start_flags;
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
extern bool xen_pv_pci_possible;
|
||||
#else
|
||||
#define xen_pv_pci_possible 0
|
||||
#endif
|
||||
|
||||
#include <xen/interface/hvm/start_info.h>
|
||||
extern struct hvm_start_info pvh_start_info;
|
||||
void xen_prepare_pvh(void);
|
||||
|
@ -90,6 +90,19 @@ config SWIOTLB
|
||||
bool
|
||||
select NEED_DMA_MAP_STATE
|
||||
|
||||
config SWIOTLB_DYNAMIC
|
||||
bool "Dynamic allocation of DMA bounce buffers"
|
||||
default n
|
||||
depends on SWIOTLB
|
||||
help
|
||||
This enables dynamic resizing of the software IO TLB. The kernel
|
||||
starts with one memory pool at boot and it will allocate additional
|
||||
pools as needed. To reduce run-time kernel memory requirements, you
|
||||
may have to specify a smaller size of the initial pool using
|
||||
"swiotlb=" on the kernel command line.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config DMA_BOUNCE_UNALIGNED_KMALLOC
|
||||
bool
|
||||
depends on SWIOTLB
|
||||
@ -145,15 +158,16 @@ config DMA_CMA
|
||||
|
||||
if DMA_CMA
|
||||
|
||||
config DMA_PERNUMA_CMA
|
||||
bool "Enable separate DMA Contiguous Memory Area for each NUMA Node"
|
||||
default NUMA && ARM64
|
||||
config DMA_NUMA_CMA
|
||||
bool "Enable separate DMA Contiguous Memory Area for NUMA Node"
|
||||
default NUMA
|
||||
help
|
||||
Enable this option to get pernuma CMA areas so that devices like
|
||||
ARM64 SMMU can get local memory by DMA coherent APIs.
|
||||
Enable this option to get numa CMA areas so that NUMA devices
|
||||
can get local memory by DMA coherent APIs.
|
||||
|
||||
You can set the size of pernuma CMA by specifying "cma_pernuma=size"
|
||||
on the kernel's command line.
|
||||
or set the node id and its size of CMA by specifying "numa_cma=
|
||||
<node>:size[,<node>:size]" on the kernel's command line.
|
||||
|
||||
comment "Default contiguous memory area size:"
|
||||
|
||||
|
@ -50,6 +50,7 @@
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/dma-map-ops.h>
|
||||
#include <linux/cma.h>
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#ifdef CONFIG_CMA_SIZE_MBYTES
|
||||
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
|
||||
@ -96,11 +97,44 @@ static int __init early_cma(char *p)
|
||||
}
|
||||
early_param("cma", early_cma);
|
||||
|
||||
#ifdef CONFIG_DMA_PERNUMA_CMA
|
||||
#ifdef CONFIG_DMA_NUMA_CMA
|
||||
|
||||
static struct cma *dma_contiguous_numa_area[MAX_NUMNODES];
|
||||
static phys_addr_t numa_cma_size[MAX_NUMNODES] __initdata;
|
||||
static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
|
||||
static phys_addr_t pernuma_size_bytes __initdata;
|
||||
|
||||
static int __init early_numa_cma(char *p)
|
||||
{
|
||||
int nid, count = 0;
|
||||
unsigned long tmp;
|
||||
char *s = p;
|
||||
|
||||
while (*s) {
|
||||
if (sscanf(s, "%lu%n", &tmp, &count) != 1)
|
||||
break;
|
||||
|
||||
if (s[count] == ':') {
|
||||
if (tmp >= MAX_NUMNODES)
|
||||
break;
|
||||
nid = array_index_nospec(tmp, MAX_NUMNODES);
|
||||
|
||||
s += count + 1;
|
||||
tmp = memparse(s, &s);
|
||||
numa_cma_size[nid] = tmp;
|
||||
|
||||
if (*s == ',')
|
||||
s++;
|
||||
else
|
||||
break;
|
||||
} else
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("numa_cma", early_numa_cma);
|
||||
|
||||
static int __init early_cma_pernuma(char *p)
|
||||
{
|
||||
pernuma_size_bytes = memparse(p, &p);
|
||||
@ -127,32 +161,49 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DMA_PERNUMA_CMA
|
||||
void __init dma_pernuma_cma_reserve(void)
|
||||
#ifdef CONFIG_DMA_NUMA_CMA
|
||||
static void __init dma_numa_cma_reserve(void)
|
||||
{
|
||||
int nid;
|
||||
|
||||
if (!pernuma_size_bytes)
|
||||
return;
|
||||
|
||||
for_each_online_node(nid) {
|
||||
for_each_node(nid) {
|
||||
int ret;
|
||||
char name[CMA_MAX_NAME];
|
||||
struct cma **cma = &dma_contiguous_pernuma_area[nid];
|
||||
struct cma **cma;
|
||||
|
||||
snprintf(name, sizeof(name), "pernuma%d", nid);
|
||||
ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
|
||||
0, false, name, cma, nid);
|
||||
if (ret) {
|
||||
pr_warn("%s: reservation failed: err %d, node %d", __func__,
|
||||
ret, nid);
|
||||
if (!node_online(nid)) {
|
||||
if (pernuma_size_bytes || numa_cma_size[nid])
|
||||
pr_warn("invalid node %d specified\n", nid);
|
||||
continue;
|
||||
}
|
||||
|
||||
pr_debug("%s: reserved %llu MiB on node %d\n", __func__,
|
||||
(unsigned long long)pernuma_size_bytes / SZ_1M, nid);
|
||||
if (pernuma_size_bytes) {
|
||||
|
||||
cma = &dma_contiguous_pernuma_area[nid];
|
||||
snprintf(name, sizeof(name), "pernuma%d", nid);
|
||||
ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
|
||||
0, false, name, cma, nid);
|
||||
if (ret)
|
||||
pr_warn("%s: reservation failed: err %d, node %d", __func__,
|
||||
ret, nid);
|
||||
}
|
||||
|
||||
if (numa_cma_size[nid]) {
|
||||
|
||||
cma = &dma_contiguous_numa_area[nid];
|
||||
snprintf(name, sizeof(name), "numa%d", nid);
|
||||
ret = cma_declare_contiguous_nid(0, numa_cma_size[nid], 0, 0, 0, false,
|
||||
name, cma, nid);
|
||||
if (ret)
|
||||
pr_warn("%s: reservation failed: err %d, node %d", __func__,
|
||||
ret, nid);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void __init dma_numa_cma_reserve(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -171,6 +222,8 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
|
||||
phys_addr_t selected_limit = limit;
|
||||
bool fixed = false;
|
||||
|
||||
dma_numa_cma_reserve();
|
||||
|
||||
pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
|
||||
|
||||
if (size_cmdline != -1) {
|
||||
@ -303,7 +356,7 @@ static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
|
||||
*/
|
||||
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
||||
{
|
||||
#ifdef CONFIG_DMA_PERNUMA_CMA
|
||||
#ifdef CONFIG_DMA_NUMA_CMA
|
||||
int nid = dev_to_node(dev);
|
||||
#endif
|
||||
|
||||
@ -315,7 +368,7 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
||||
if (size <= PAGE_SIZE)
|
||||
return NULL;
|
||||
|
||||
#ifdef CONFIG_DMA_PERNUMA_CMA
|
||||
#ifdef CONFIG_DMA_NUMA_CMA
|
||||
if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
|
||||
struct cma *cma = dma_contiguous_pernuma_area[nid];
|
||||
struct page *page;
|
||||
@ -325,6 +378,13 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
||||
if (page)
|
||||
return page;
|
||||
}
|
||||
|
||||
cma = dma_contiguous_numa_area[nid];
|
||||
if (cma) {
|
||||
page = cma_alloc_aligned(cma, size, gfp);
|
||||
if (page)
|
||||
return page;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (!dma_contiguous_default_area)
|
||||
@ -356,10 +416,13 @@ void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
|
||||
/*
|
||||
* otherwise, page is from either per-numa cma or default cma
|
||||
*/
|
||||
#ifdef CONFIG_DMA_PERNUMA_CMA
|
||||
#ifdef CONFIG_DMA_NUMA_CMA
|
||||
if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)],
|
||||
page, count))
|
||||
return;
|
||||
if (cma_release(dma_contiguous_numa_area[page_to_nid(page)],
|
||||
page, count))
|
||||
return;
|
||||
#endif
|
||||
if (cma_release(dma_contiguous_default_area, page, count))
|
||||
return;
|
||||
@ -410,6 +473,11 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (memblock_is_region_reserved(rmem->base, rmem->size)) {
|
||||
pr_info("Reserved memory: overlap with other memblock reserved region\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
|
||||
of_get_flat_dt_prop(node, "no-map", NULL))
|
||||
return -EINVAL;
|
||||
|
@ -66,7 +66,7 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||||
bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||||
{
|
||||
dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
|
||||
|
||||
|
@ -760,12 +760,6 @@ bool dma_pci_p2pdma_supported(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
|
||||
void arch_dma_set_mask(struct device *dev, u64 mask);
|
||||
#else
|
||||
#define arch_dma_set_mask(dev, mask) do { } while (0)
|
||||
#endif
|
||||
|
||||
int dma_set_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
/*
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/spinlock.h>
|
||||
@ -62,6 +63,13 @@
|
||||
|
||||
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
|
||||
|
||||
/**
|
||||
* struct io_tlb_slot - IO TLB slot descriptor
|
||||
* @orig_addr: The original address corresponding to a mapped entry.
|
||||
* @alloc_size: Size of the allocated buffer.
|
||||
* @list: The free list describing the number of free entries available
|
||||
* from each index.
|
||||
*/
|
||||
struct io_tlb_slot {
|
||||
phys_addr_t orig_addr;
|
||||
size_t alloc_size;
|
||||
@ -71,7 +79,22 @@ struct io_tlb_slot {
|
||||
static bool swiotlb_force_bounce;
|
||||
static bool swiotlb_force_disable;
|
||||
|
||||
struct io_tlb_mem io_tlb_default_mem;
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
|
||||
static void swiotlb_dyn_alloc(struct work_struct *work);
|
||||
|
||||
static struct io_tlb_mem io_tlb_default_mem = {
|
||||
.lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
|
||||
.pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
|
||||
.dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
|
||||
swiotlb_dyn_alloc),
|
||||
};
|
||||
|
||||
#else /* !CONFIG_SWIOTLB_DYNAMIC */
|
||||
|
||||
static struct io_tlb_mem io_tlb_default_mem;
|
||||
|
||||
#endif /* CONFIG_SWIOTLB_DYNAMIC */
|
||||
|
||||
static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
|
||||
static unsigned long default_nareas;
|
||||
@ -202,7 +225,7 @@ void __init swiotlb_adjust_size(unsigned long size)
|
||||
|
||||
void swiotlb_print_info(void)
|
||||
{
|
||||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
|
||||
|
||||
if (!mem->nslabs) {
|
||||
pr_warn("No low mem\n");
|
||||
@ -231,7 +254,7 @@ static inline unsigned long nr_slots(u64 val)
|
||||
*/
|
||||
void __init swiotlb_update_mem_attributes(void)
|
||||
{
|
||||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
|
||||
unsigned long bytes;
|
||||
|
||||
if (!mem->nslabs || mem->late_alloc)
|
||||
@ -240,9 +263,8 @@ void __init swiotlb_update_mem_attributes(void)
|
||||
set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
||||
unsigned long nslabs, unsigned int flags,
|
||||
bool late_alloc, unsigned int nareas)
|
||||
static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
|
||||
unsigned long nslabs, bool late_alloc, unsigned int nareas)
|
||||
{
|
||||
void *vaddr = phys_to_virt(start);
|
||||
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
|
||||
@ -254,8 +276,6 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
||||
mem->nareas = nareas;
|
||||
mem->area_nslabs = nslabs / mem->nareas;
|
||||
|
||||
mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
|
||||
|
||||
for (i = 0; i < mem->nareas; i++) {
|
||||
spin_lock_init(&mem->areas[i].lock);
|
||||
mem->areas[i].index = 0;
|
||||
@ -273,6 +293,23 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* add_mem_pool() - add a memory pool to the allocator
|
||||
* @mem: Software IO TLB allocator.
|
||||
* @pool: Memory pool to be added.
|
||||
*/
|
||||
static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
|
||||
{
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
spin_lock(&mem->lock);
|
||||
list_add_rcu(&pool->node, &mem->pools);
|
||||
mem->nslabs += pool->nslabs;
|
||||
spin_unlock(&mem->lock);
|
||||
#else
|
||||
mem->nslabs = pool->nslabs;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
|
||||
unsigned int flags,
|
||||
int (*remap)(void *tlb, unsigned long nslabs))
|
||||
@ -312,7 +349,7 @@ static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
|
||||
void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
||||
int (*remap)(void *tlb, unsigned long nslabs))
|
||||
{
|
||||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
|
||||
unsigned long nslabs;
|
||||
unsigned int nareas;
|
||||
size_t alloc_size;
|
||||
@ -323,6 +360,18 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
||||
if (swiotlb_force_disable)
|
||||
return;
|
||||
|
||||
io_tlb_default_mem.force_bounce =
|
||||
swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
if (!remap)
|
||||
io_tlb_default_mem.can_grow = true;
|
||||
if (flags & SWIOTLB_ANY)
|
||||
io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
|
||||
else
|
||||
io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
|
||||
#endif
|
||||
|
||||
if (!default_nareas)
|
||||
swiotlb_adjust_nareas(num_possible_cpus());
|
||||
|
||||
@ -356,8 +405,9 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
||||
return;
|
||||
}
|
||||
|
||||
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
|
||||
default_nareas);
|
||||
swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false,
|
||||
default_nareas);
|
||||
add_mem_pool(&io_tlb_default_mem, mem);
|
||||
|
||||
if (flags & SWIOTLB_VERBOSE)
|
||||
swiotlb_print_info();
|
||||
@ -376,7 +426,7 @@ void __init swiotlb_init(bool addressing_limit, unsigned int flags)
|
||||
int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
||||
int (*remap)(void *tlb, unsigned long nslabs))
|
||||
{
|
||||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
|
||||
unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
|
||||
unsigned int nareas;
|
||||
unsigned char *vstart = NULL;
|
||||
@ -384,9 +434,25 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
||||
bool retried = false;
|
||||
int rc = 0;
|
||||
|
||||
if (io_tlb_default_mem.nslabs)
|
||||
return 0;
|
||||
|
||||
if (swiotlb_force_disable)
|
||||
return 0;
|
||||
|
||||
io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
if (!remap)
|
||||
io_tlb_default_mem.can_grow = true;
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
|
||||
io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
|
||||
else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
|
||||
io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
|
||||
else
|
||||
io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
|
||||
#endif
|
||||
|
||||
if (!default_nareas)
|
||||
swiotlb_adjust_nareas(num_possible_cpus());
|
||||
|
||||
@ -438,8 +504,9 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
||||
|
||||
set_memory_decrypted((unsigned long)vstart,
|
||||
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
|
||||
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
|
||||
nareas);
|
||||
swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true,
|
||||
nareas);
|
||||
add_mem_pool(&io_tlb_default_mem, mem);
|
||||
|
||||
swiotlb_print_info();
|
||||
return 0;
|
||||
@ -453,7 +520,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
||||
|
||||
void __init swiotlb_exit(void)
|
||||
{
|
||||
struct io_tlb_mem *mem = &io_tlb_default_mem;
|
||||
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
|
||||
unsigned long tbl_vaddr;
|
||||
size_t tbl_size, slots_size;
|
||||
unsigned int area_order;
|
||||
@ -486,6 +553,265 @@ void __init swiotlb_exit(void)
|
||||
memset(mem, 0, sizeof(*mem));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
|
||||
/**
|
||||
* alloc_dma_pages() - allocate pages to be used for DMA
|
||||
* @gfp: GFP flags for the allocation.
|
||||
* @bytes: Size of the buffer.
|
||||
*
|
||||
* Allocate pages from the buddy allocator. If successful, make the allocated
|
||||
* pages decrypted that they can be used for DMA.
|
||||
*
|
||||
* Return: Decrypted pages, or %NULL on failure.
|
||||
*/
|
||||
static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes)
|
||||
{
|
||||
unsigned int order = get_order(bytes);
|
||||
struct page *page;
|
||||
void *vaddr;
|
||||
|
||||
page = alloc_pages(gfp, order);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
vaddr = page_address(page);
|
||||
if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
|
||||
goto error;
|
||||
return page;
|
||||
|
||||
error:
|
||||
__free_pages(page, order);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
|
||||
* @dev: Device for which a memory pool is allocated.
|
||||
* @bytes: Size of the buffer.
|
||||
* @phys_limit: Maximum allowed physical address of the buffer.
|
||||
* @gfp: GFP flags for the allocation.
|
||||
*
|
||||
* Return: Allocated pages, or %NULL on allocation failure.
|
||||
*/
|
||||
static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
|
||||
u64 phys_limit, gfp_t gfp)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
/*
|
||||
* Allocate from the atomic pools if memory is encrypted and
|
||||
* the allocation is atomic, because decrypting may block.
|
||||
*/
|
||||
if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) {
|
||||
void *vaddr;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
|
||||
return NULL;
|
||||
|
||||
return dma_alloc_from_pool(dev, bytes, &vaddr, gfp,
|
||||
dma_coherent_ok);
|
||||
}
|
||||
|
||||
gfp &= ~GFP_ZONEMASK;
|
||||
if (phys_limit <= DMA_BIT_MASK(zone_dma_bits))
|
||||
gfp |= __GFP_DMA;
|
||||
else if (phys_limit <= DMA_BIT_MASK(32))
|
||||
gfp |= __GFP_DMA32;
|
||||
|
||||
while ((page = alloc_dma_pages(gfp, bytes)) &&
|
||||
page_to_phys(page) + bytes - 1 > phys_limit) {
|
||||
/* allocated, but too high */
|
||||
__free_pages(page, get_order(bytes));
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
|
||||
phys_limit < DMA_BIT_MASK(64) &&
|
||||
!(gfp & (__GFP_DMA32 | __GFP_DMA)))
|
||||
gfp |= __GFP_DMA32;
|
||||
else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
|
||||
!(gfp & __GFP_DMA))
|
||||
gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
/**
|
||||
* swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
|
||||
* @vaddr: Virtual address of the buffer.
|
||||
* @bytes: Size of the buffer.
|
||||
*/
|
||||
static void swiotlb_free_tlb(void *vaddr, size_t bytes)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
|
||||
dma_free_from_pool(NULL, vaddr, bytes))
|
||||
return;
|
||||
|
||||
/* Intentional leak if pages cannot be encrypted again. */
|
||||
if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
|
||||
__free_pages(virt_to_page(vaddr), get_order(bytes));
|
||||
}
|
||||
|
||||
/**
|
||||
* swiotlb_alloc_pool() - allocate a new IO TLB memory pool
|
||||
* @dev: Device for which a memory pool is allocated.
|
||||
* @minslabs: Minimum number of slabs.
|
||||
* @nslabs: Desired (maximum) number of slabs.
|
||||
* @nareas: Number of areas.
|
||||
* @phys_limit: Maximum DMA buffer physical address.
|
||||
* @gfp: GFP flags for the allocations.
|
||||
*
|
||||
* Allocate and initialize a new IO TLB memory pool. The actual number of
|
||||
* slabs may be reduced if allocation of @nslabs fails. If even
|
||||
* @minslabs cannot be allocated, this function fails.
|
||||
*
|
||||
* Return: New memory pool, or %NULL on allocation failure.
|
||||
*/
|
||||
static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
|
||||
unsigned long minslabs, unsigned long nslabs,
|
||||
unsigned int nareas, u64 phys_limit, gfp_t gfp)
|
||||
{
|
||||
struct io_tlb_pool *pool;
|
||||
unsigned int slot_order;
|
||||
struct page *tlb;
|
||||
size_t pool_size;
|
||||
size_t tlb_size;
|
||||
|
||||
pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
|
||||
pool = kzalloc(pool_size, gfp);
|
||||
if (!pool)
|
||||
goto error;
|
||||
pool->areas = (void *)pool + sizeof(*pool);
|
||||
|
||||
tlb_size = nslabs << IO_TLB_SHIFT;
|
||||
while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
|
||||
if (nslabs <= minslabs)
|
||||
goto error_tlb;
|
||||
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
|
||||
nareas = limit_nareas(nareas, nslabs);
|
||||
tlb_size = nslabs << IO_TLB_SHIFT;
|
||||
}
|
||||
|
||||
slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
|
||||
pool->slots = (struct io_tlb_slot *)
|
||||
__get_free_pages(gfp, slot_order);
|
||||
if (!pool->slots)
|
||||
goto error_slots;
|
||||
|
||||
swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
|
||||
return pool;
|
||||
|
||||
error_slots:
|
||||
swiotlb_free_tlb(page_address(tlb), tlb_size);
|
||||
error_tlb:
|
||||
kfree(pool);
|
||||
error:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* swiotlb_dyn_alloc() - dynamic memory pool allocation worker
|
||||
* @work: Pointer to dyn_alloc in struct io_tlb_mem.
|
||||
*/
|
||||
static void swiotlb_dyn_alloc(struct work_struct *work)
|
||||
{
|
||||
struct io_tlb_mem *mem =
|
||||
container_of(work, struct io_tlb_mem, dyn_alloc);
|
||||
struct io_tlb_pool *pool;
|
||||
|
||||
pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
|
||||
default_nareas, mem->phys_limit, GFP_KERNEL);
|
||||
if (!pool) {
|
||||
pr_warn_ratelimited("Failed to allocate new pool");
|
||||
return;
|
||||
}
|
||||
|
||||
add_mem_pool(mem, pool);
|
||||
|
||||
/* Pairs with smp_rmb() in is_swiotlb_buffer(). */
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
/**
|
||||
* swiotlb_dyn_free() - RCU callback to free a memory pool
|
||||
* @rcu: RCU head in the corresponding struct io_tlb_pool.
|
||||
*/
|
||||
static void swiotlb_dyn_free(struct rcu_head *rcu)
|
||||
{
|
||||
struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
|
||||
size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
|
||||
size_t tlb_size = pool->end - pool->start;
|
||||
|
||||
free_pages((unsigned long)pool->slots, get_order(slots_size));
|
||||
swiotlb_free_tlb(pool->vaddr, tlb_size);
|
||||
kfree(pool);
|
||||
}
|
||||
|
||||
/**
|
||||
* swiotlb_find_pool() - find the IO TLB pool for a physical address
|
||||
* @dev: Device which has mapped the DMA buffer.
|
||||
* @paddr: Physical address within the DMA buffer.
|
||||
*
|
||||
* Find the IO TLB memory pool descriptor which contains the given physical
|
||||
* address, if any.
|
||||
*
|
||||
* Return: Memory pool which contains @paddr, or %NULL if none.
|
||||
*/
|
||||
struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
struct io_tlb_pool *pool;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pool, &mem->pools, node) {
|
||||
if (paddr >= pool->start && paddr < pool->end)
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
|
||||
if (paddr >= pool->start && paddr < pool->end)
|
||||
goto out;
|
||||
}
|
||||
pool = NULL;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return pool;
|
||||
}
|
||||
|
||||
/**
|
||||
* swiotlb_del_pool() - remove an IO TLB pool from a device
|
||||
* @dev: Owning device.
|
||||
* @pool: Memory pool to be removed.
|
||||
*/
|
||||
static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
|
||||
list_del_rcu(&pool->node);
|
||||
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
|
||||
|
||||
call_rcu(&pool->rcu, swiotlb_dyn_free);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SWIOTLB_DYNAMIC */
|
||||
|
||||
/**
|
||||
* swiotlb_dev_init() - initialize swiotlb fields in &struct device
|
||||
* @dev: Device to be initialized.
|
||||
*/
|
||||
void swiotlb_dev_init(struct device *dev)
|
||||
{
|
||||
dev->dma_io_tlb_mem = &io_tlb_default_mem;
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
|
||||
spin_lock_init(&dev->dma_io_tlb_lock);
|
||||
dev->dma_uses_io_tlb = false;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the offset into a iotlb slot required to keep the device happy.
|
||||
*/
|
||||
@ -500,7 +826,7 @@ static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
|
||||
static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
|
||||
int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
|
||||
phys_addr_t orig_addr = mem->slots[index].orig_addr;
|
||||
size_t alloc_size = mem->slots[index].alloc_size;
|
||||
@ -577,12 +903,10 @@ static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
|
||||
*/
|
||||
static inline unsigned long get_max_slots(unsigned long boundary_mask)
|
||||
{
|
||||
if (boundary_mask == ~0UL)
|
||||
return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
|
||||
return nr_slots(boundary_mask + 1);
|
||||
return (boundary_mask >> IO_TLB_SHIFT) + 1;
|
||||
}
|
||||
|
||||
static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index)
|
||||
static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index)
|
||||
{
|
||||
if (index >= mem->area_nslabs)
|
||||
return 0;
|
||||
@ -623,19 +947,30 @@ static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
/*
|
||||
* Find a suitable number of IO TLB entries size that will fit this request and
|
||||
* allocate a buffer from that IO TLB pool.
|
||||
/**
|
||||
* swiotlb_area_find_slots() - search for slots in one IO TLB memory area
|
||||
* @dev: Device which maps the buffer.
|
||||
* @pool: Memory pool to be searched.
|
||||
* @area_index: Index of the IO TLB memory area to be searched.
|
||||
* @orig_addr: Original (non-bounced) IO buffer address.
|
||||
* @alloc_size: Total requested size of the bounce buffer,
|
||||
* including initial alignment padding.
|
||||
* @alloc_align_mask: Required alignment of the allocated buffer.
|
||||
*
|
||||
* Find a suitable sequence of IO TLB entries for the request and allocate
|
||||
* a buffer from the given IO TLB memory area.
|
||||
* This function takes care of locking.
|
||||
*
|
||||
* Return: Index of the first allocated slot, or -1 on error.
|
||||
*/
|
||||
static int swiotlb_do_find_slots(struct device *dev, int area_index,
|
||||
phys_addr_t orig_addr, size_t alloc_size,
|
||||
static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
|
||||
int area_index, phys_addr_t orig_addr, size_t alloc_size,
|
||||
unsigned int alloc_align_mask)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
struct io_tlb_area *area = mem->areas + area_index;
|
||||
struct io_tlb_area *area = pool->areas + area_index;
|
||||
unsigned long boundary_mask = dma_get_seg_boundary(dev);
|
||||
dma_addr_t tbl_dma_addr =
|
||||
phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
|
||||
phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
|
||||
unsigned long max_slots = get_max_slots(boundary_mask);
|
||||
unsigned int iotlb_align_mask =
|
||||
dma_get_min_align_mask(dev) | alloc_align_mask;
|
||||
@ -647,7 +982,7 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
|
||||
unsigned int slot_index;
|
||||
|
||||
BUG_ON(!nslots);
|
||||
BUG_ON(area_index >= mem->nareas);
|
||||
BUG_ON(area_index >= pool->nareas);
|
||||
|
||||
/*
|
||||
* For allocations of PAGE_SIZE or larger only look for page aligned
|
||||
@ -664,35 +999,30 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
|
||||
stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
|
||||
|
||||
spin_lock_irqsave(&area->lock, flags);
|
||||
if (unlikely(nslots > mem->area_nslabs - area->used))
|
||||
if (unlikely(nslots > pool->area_nslabs - area->used))
|
||||
goto not_found;
|
||||
|
||||
slot_base = area_index * mem->area_nslabs;
|
||||
slot_base = area_index * pool->area_nslabs;
|
||||
index = area->index;
|
||||
|
||||
for (slots_checked = 0; slots_checked < mem->area_nslabs; ) {
|
||||
for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
|
||||
slot_index = slot_base + index;
|
||||
|
||||
if (orig_addr &&
|
||||
(slot_addr(tbl_dma_addr, slot_index) &
|
||||
iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
|
||||
index = wrap_area_index(mem, index + 1);
|
||||
index = wrap_area_index(pool, index + 1);
|
||||
slots_checked++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we find a slot that indicates we have 'nslots' number of
|
||||
* contiguous buffers, we allocate the buffers from that slot
|
||||
* and mark the entries as '0' indicating unavailable.
|
||||
*/
|
||||
if (!iommu_is_span_boundary(slot_index, nslots,
|
||||
nr_slots(tbl_dma_addr),
|
||||
max_slots)) {
|
||||
if (mem->slots[slot_index].list >= nslots)
|
||||
if (pool->slots[slot_index].list >= nslots)
|
||||
goto found;
|
||||
}
|
||||
index = wrap_area_index(mem, index + stride);
|
||||
index = wrap_area_index(pool, index + stride);
|
||||
slots_checked += stride;
|
||||
}
|
||||
|
||||
@ -701,48 +1031,159 @@ static int swiotlb_do_find_slots(struct device *dev, int area_index,
|
||||
return -1;
|
||||
|
||||
found:
|
||||
/*
|
||||
* If we find a slot that indicates we have 'nslots' number of
|
||||
* contiguous buffers, we allocate the buffers from that slot onwards
|
||||
* and set the list of free entries to '0' indicating unavailable.
|
||||
*/
|
||||
for (i = slot_index; i < slot_index + nslots; i++) {
|
||||
mem->slots[i].list = 0;
|
||||
mem->slots[i].alloc_size = alloc_size - (offset +
|
||||
pool->slots[i].list = 0;
|
||||
pool->slots[i].alloc_size = alloc_size - (offset +
|
||||
((i - slot_index) << IO_TLB_SHIFT));
|
||||
}
|
||||
for (i = slot_index - 1;
|
||||
io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
|
||||
mem->slots[i].list; i--)
|
||||
mem->slots[i].list = ++count;
|
||||
pool->slots[i].list; i--)
|
||||
pool->slots[i].list = ++count;
|
||||
|
||||
/*
|
||||
* Update the indices to avoid searching in the next round.
|
||||
*/
|
||||
area->index = wrap_area_index(mem, index + nslots);
|
||||
area->index = wrap_area_index(pool, index + nslots);
|
||||
area->used += nslots;
|
||||
spin_unlock_irqrestore(&area->lock, flags);
|
||||
|
||||
inc_used_and_hiwater(mem, nslots);
|
||||
inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots);
|
||||
return slot_index;
|
||||
}
|
||||
|
||||
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
||||
size_t alloc_size, unsigned int alloc_align_mask)
|
||||
/**
|
||||
* swiotlb_pool_find_slots() - search for slots in one memory pool
|
||||
* @dev: Device which maps the buffer.
|
||||
* @pool: Memory pool to be searched.
|
||||
* @orig_addr: Original (non-bounced) IO buffer address.
|
||||
* @alloc_size: Total requested size of the bounce buffer,
|
||||
* including initial alignment padding.
|
||||
* @alloc_align_mask: Required alignment of the allocated buffer.
|
||||
*
|
||||
* Search through one memory pool to find a sequence of slots that match the
|
||||
* allocation constraints.
|
||||
*
|
||||
* Return: Index of the first allocated slot, or -1 on error.
|
||||
*/
|
||||
static int swiotlb_pool_find_slots(struct device *dev, struct io_tlb_pool *pool,
|
||||
phys_addr_t orig_addr, size_t alloc_size,
|
||||
unsigned int alloc_align_mask)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
int start = raw_smp_processor_id() & (mem->nareas - 1);
|
||||
int start = raw_smp_processor_id() & (pool->nareas - 1);
|
||||
int i = start, index;
|
||||
|
||||
do {
|
||||
index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
|
||||
alloc_align_mask);
|
||||
index = swiotlb_area_find_slots(dev, pool, i, orig_addr,
|
||||
alloc_size, alloc_align_mask);
|
||||
if (index >= 0)
|
||||
return index;
|
||||
if (++i >= mem->nareas)
|
||||
if (++i >= pool->nareas)
|
||||
i = 0;
|
||||
} while (i != start);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
|
||||
/**
|
||||
* swiotlb_find_slots() - search for slots in the whole swiotlb
|
||||
* @dev: Device which maps the buffer.
|
||||
* @orig_addr: Original (non-bounced) IO buffer address.
|
||||
* @alloc_size: Total requested size of the bounce buffer,
|
||||
* including initial alignment padding.
|
||||
* @alloc_align_mask: Required alignment of the allocated buffer.
|
||||
* @retpool: Used memory pool, updated on return.
|
||||
*
|
||||
* Search through the whole software IO TLB to find a sequence of slots that
|
||||
* match the allocation constraints.
|
||||
*
|
||||
* Return: Index of the first allocated slot, or -1 on error.
|
||||
*/
|
||||
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
||||
size_t alloc_size, unsigned int alloc_align_mask,
|
||||
struct io_tlb_pool **retpool)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
struct io_tlb_pool *pool;
|
||||
unsigned long nslabs;
|
||||
unsigned long flags;
|
||||
u64 phys_limit;
|
||||
int index;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pool, &mem->pools, node) {
|
||||
index = swiotlb_pool_find_slots(dev, pool, orig_addr,
|
||||
alloc_size, alloc_align_mask);
|
||||
if (index >= 0) {
|
||||
rcu_read_unlock();
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (!mem->can_grow)
|
||||
return -1;
|
||||
|
||||
schedule_work(&mem->dyn_alloc);
|
||||
|
||||
nslabs = nr_slots(alloc_size);
|
||||
phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
|
||||
pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
|
||||
GFP_NOWAIT | __GFP_NOWARN);
|
||||
if (!pool)
|
||||
return -1;
|
||||
|
||||
index = swiotlb_pool_find_slots(dev, pool, orig_addr,
|
||||
alloc_size, alloc_align_mask);
|
||||
if (index < 0) {
|
||||
swiotlb_dyn_free(&pool->rcu);
|
||||
return -1;
|
||||
}
|
||||
|
||||
pool->transient = true;
|
||||
spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
|
||||
list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
|
||||
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
|
||||
|
||||
found:
|
||||
dev->dma_uses_io_tlb = true;
|
||||
/* Pairs with smp_rmb() in is_swiotlb_buffer() */
|
||||
smp_wmb();
|
||||
|
||||
*retpool = pool;
|
||||
return index;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SWIOTLB_DYNAMIC */
|
||||
|
||||
static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
|
||||
size_t alloc_size, unsigned int alloc_align_mask,
|
||||
struct io_tlb_pool **retpool)
|
||||
{
|
||||
*retpool = &dev->dma_io_tlb_mem->defpool;
|
||||
return swiotlb_pool_find_slots(dev, *retpool,
|
||||
orig_addr, alloc_size, alloc_align_mask);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SWIOTLB_DYNAMIC */
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
/**
|
||||
* mem_used() - get number of used slots in an allocator
|
||||
* @mem: Software IO TLB allocator.
|
||||
*
|
||||
* The result is accurate in this version of the function, because an atomic
|
||||
* counter is available if CONFIG_DEBUG_FS is set.
|
||||
*
|
||||
* Return: Number of used slots.
|
||||
*/
|
||||
static unsigned long mem_used(struct io_tlb_mem *mem)
|
||||
{
|
||||
return atomic_long_read(&mem->total_used);
|
||||
@ -750,16 +1191,50 @@ static unsigned long mem_used(struct io_tlb_mem *mem)
|
||||
|
||||
#else /* !CONFIG_DEBUG_FS */
|
||||
|
||||
static unsigned long mem_used(struct io_tlb_mem *mem)
|
||||
/**
|
||||
* mem_pool_used() - get number of used slots in a memory pool
|
||||
* @pool: Software IO TLB memory pool.
|
||||
*
|
||||
* The result is not accurate, see mem_used().
|
||||
*
|
||||
* Return: Approximate number of used slots.
|
||||
*/
|
||||
static unsigned long mem_pool_used(struct io_tlb_pool *pool)
|
||||
{
|
||||
int i;
|
||||
unsigned long used = 0;
|
||||
|
||||
for (i = 0; i < mem->nareas; i++)
|
||||
used += mem->areas[i].used;
|
||||
for (i = 0; i < pool->nareas; i++)
|
||||
used += pool->areas[i].used;
|
||||
return used;
|
||||
}
|
||||
|
||||
/**
|
||||
* mem_used() - get number of used slots in an allocator
|
||||
* @mem: Software IO TLB allocator.
|
||||
*
|
||||
* The result is not accurate, because there is no locking of individual
|
||||
* areas.
|
||||
*
|
||||
* Return: Approximate number of used slots.
|
||||
*/
|
||||
static unsigned long mem_used(struct io_tlb_mem *mem)
|
||||
{
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
struct io_tlb_pool *pool;
|
||||
unsigned long used = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pool, &mem->pools, node)
|
||||
used += mem_pool_used(pool);
|
||||
rcu_read_unlock();
|
||||
|
||||
return used;
|
||||
#else
|
||||
return mem_pool_used(&mem->defpool);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
||||
@ -769,6 +1244,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
|
||||
struct io_tlb_pool *pool;
|
||||
unsigned int i;
|
||||
int index;
|
||||
phys_addr_t tlb_addr;
|
||||
@ -789,7 +1265,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
||||
}
|
||||
|
||||
index = swiotlb_find_slots(dev, orig_addr,
|
||||
alloc_size + offset, alloc_align_mask);
|
||||
alloc_size + offset, alloc_align_mask, &pool);
|
||||
if (index == -1) {
|
||||
if (!(attrs & DMA_ATTR_NO_WARN))
|
||||
dev_warn_ratelimited(dev,
|
||||
@ -804,8 +1280,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
||||
* needed.
|
||||
*/
|
||||
for (i = 0; i < nr_slots(alloc_size + offset); i++)
|
||||
mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
|
||||
tlb_addr = slot_addr(mem->start, index) + offset;
|
||||
pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
|
||||
tlb_addr = slot_addr(pool->start, index) + offset;
|
||||
/*
|
||||
* When dir == DMA_FROM_DEVICE we could omit the copy from the orig
|
||||
* to the tlb buffer, if we knew for sure the device will
|
||||
@ -819,7 +1295,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
|
||||
|
||||
static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
|
||||
unsigned long flags;
|
||||
unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
|
||||
int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
|
||||
@ -863,9 +1339,44 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
|
||||
area->used -= nslots;
|
||||
spin_unlock_irqrestore(&area->lock, flags);
|
||||
|
||||
dec_used(mem, nslots);
|
||||
dec_used(dev->dma_io_tlb_mem, nslots);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
|
||||
/**
|
||||
* swiotlb_del_transient() - delete a transient memory pool
|
||||
* @dev: Device which mapped the buffer.
|
||||
* @tlb_addr: Physical address within a bounce buffer.
|
||||
*
|
||||
* Check whether the address belongs to a transient SWIOTLB memory pool.
|
||||
* If yes, then delete the pool.
|
||||
*
|
||||
* Return: %true if @tlb_addr belonged to a transient pool that was released.
|
||||
*/
|
||||
static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr)
|
||||
{
|
||||
struct io_tlb_pool *pool;
|
||||
|
||||
pool = swiotlb_find_pool(dev, tlb_addr);
|
||||
if (!pool->transient)
|
||||
return false;
|
||||
|
||||
dec_used(dev->dma_io_tlb_mem, pool->nslabs);
|
||||
swiotlb_del_pool(dev, pool);
|
||||
return true;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SWIOTLB_DYNAMIC */
|
||||
|
||||
static inline bool swiotlb_del_transient(struct device *dev,
|
||||
phys_addr_t tlb_addr)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SWIOTLB_DYNAMIC */
|
||||
|
||||
/*
|
||||
* tlb_addr is the physical address of the bounce buffer to unmap.
|
||||
*/
|
||||
@ -880,6 +1391,8 @@ void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
|
||||
(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
|
||||
swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
|
||||
|
||||
if (swiotlb_del_transient(dev, tlb_addr))
|
||||
return;
|
||||
swiotlb_release_slots(dev, tlb_addr);
|
||||
}
|
||||
|
||||
@ -950,13 +1463,47 @@ size_t swiotlb_max_mapping_size(struct device *dev)
|
||||
return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
|
||||
}
|
||||
|
||||
/**
|
||||
* is_swiotlb_allocated() - check if the default software IO TLB is initialized
|
||||
*/
|
||||
bool is_swiotlb_allocated(void)
|
||||
{
|
||||
return io_tlb_default_mem.nslabs;
|
||||
}
|
||||
|
||||
bool is_swiotlb_active(struct device *dev)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
|
||||
return mem && mem->nslabs;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(is_swiotlb_active);
|
||||
|
||||
/**
|
||||
* default_swiotlb_base() - get the base address of the default SWIOTLB
|
||||
*
|
||||
* Get the lowest physical address used by the default software IO TLB pool.
|
||||
*/
|
||||
phys_addr_t default_swiotlb_base(void)
|
||||
{
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
io_tlb_default_mem.can_grow = false;
|
||||
#endif
|
||||
return io_tlb_default_mem.defpool.start;
|
||||
}
|
||||
|
||||
/**
|
||||
* default_swiotlb_limit() - get the address limit of the default SWIOTLB
|
||||
*
|
||||
* Get the highest physical address used by the default software IO TLB pool.
|
||||
*/
|
||||
phys_addr_t default_swiotlb_limit(void)
|
||||
{
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
return io_tlb_default_mem.phys_limit;
|
||||
#else
|
||||
return io_tlb_default_mem.defpool.end - 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
@ -1031,17 +1578,18 @@ static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
|
||||
struct page *swiotlb_alloc(struct device *dev, size_t size)
|
||||
{
|
||||
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
||||
struct io_tlb_pool *pool;
|
||||
phys_addr_t tlb_addr;
|
||||
int index;
|
||||
|
||||
if (!mem)
|
||||
return NULL;
|
||||
|
||||
index = swiotlb_find_slots(dev, 0, size, 0);
|
||||
index = swiotlb_find_slots(dev, 0, size, 0, &pool);
|
||||
if (index == -1)
|
||||
return NULL;
|
||||
|
||||
tlb_addr = slot_addr(mem->start, index);
|
||||
tlb_addr = slot_addr(pool->start, index);
|
||||
|
||||
return pfn_to_page(PFN_DOWN(tlb_addr));
|
||||
}
|
||||
@ -1078,29 +1626,37 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
|
||||
* to it.
|
||||
*/
|
||||
if (!mem) {
|
||||
struct io_tlb_pool *pool;
|
||||
|
||||
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
pool = &mem->defpool;
|
||||
|
||||
mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL);
|
||||
if (!mem->slots) {
|
||||
pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL);
|
||||
if (!pool->slots) {
|
||||
kfree(mem);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mem->areas = kcalloc(nareas, sizeof(*mem->areas),
|
||||
pool->areas = kcalloc(nareas, sizeof(*pool->areas),
|
||||
GFP_KERNEL);
|
||||
if (!mem->areas) {
|
||||
kfree(mem->slots);
|
||||
if (!pool->areas) {
|
||||
kfree(pool->slots);
|
||||
kfree(mem);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
|
||||
rmem->size >> PAGE_SHIFT);
|
||||
swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
|
||||
false, nareas);
|
||||
swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
|
||||
false, nareas);
|
||||
mem->force_bounce = true;
|
||||
mem->for_alloc = true;
|
||||
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
||||
spin_lock_init(&mem->lock);
|
||||
#endif
|
||||
add_mem_pool(mem, pool);
|
||||
|
||||
rmem->priv = mem;
|
||||
|
||||
|
10
mm/cma.c
10
mm/cma.c
@ -267,6 +267,9 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
|
||||
if (alignment && !is_power_of_2(alignment))
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_NUMA))
|
||||
nid = NUMA_NO_NODE;
|
||||
|
||||
/* Sanitise input arguments. */
|
||||
alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
|
||||
if (fixed && base & (alignment - 1)) {
|
||||
@ -372,14 +375,15 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
|
||||
if (ret)
|
||||
goto free_mem;
|
||||
|
||||
pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
|
||||
&base);
|
||||
pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M,
|
||||
&base, nid);
|
||||
return 0;
|
||||
|
||||
free_mem:
|
||||
memblock_phys_free(base, size);
|
||||
err:
|
||||
pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
|
||||
pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M,
|
||||
nid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -895,10 +895,9 @@ void __init setup_kmalloc_cache_index_table(void)
|
||||
|
||||
static unsigned int __kmalloc_minalign(void)
|
||||
{
|
||||
#ifdef CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC
|
||||
if (io_tlb_default_mem.nslabs)
|
||||
if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
|
||||
is_swiotlb_allocated())
|
||||
return ARCH_KMALLOC_MINALIGN;
|
||||
#endif
|
||||
return dma_get_cache_alignment();
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user