mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-08 14:13:53 +00:00
327e2c97c4
Currently swiotlb_tbl_map_single() takes alloc_align_mask and alloc_size arguments to specify an swiotlb allocation that is larger than mapping_size. This larger allocation is used solely by iommu_dma_map_single() to handle untrusted devices that should not have DMA visibility to memory pages that are partially used for unrelated kernel data. Having two arguments to specify the allocation is redundant. While alloc_align_mask naturally specifies the alignment of the starting address of the allocation, it can also implicitly specify the size by rounding up the mapping_size to that alignment. Additionally, the current approach has an edge case bug. iommu_dma_map_page() already does the rounding up to compute the alloc_size argument. But swiotlb_tbl_map_single() then calculates the alignment offset based on the DMA min_align_mask, and adds that offset to alloc_size. If the offset is non-zero, the addition may result in a value that is larger than the max the swiotlb can allocate. If the rounding up is done _after_ the alignment offset is added to the mapping_size (and the original mapping_size conforms to the value returned by swiotlb_max_mapping_size), then the max that the swiotlb can allocate will not be exceeded. In view of these issues, simplify the swiotlb_tbl_map_single() interface by removing the alloc_size argument. Most call sites pass the same value for mapping_size and alloc_size, and they pass alloc_align_mask as zero. Just remove the redundant argument from these callers, as they will see no functional change. For iommu_dma_map_page() also remove the alloc_size argument, and have swiotlb_tbl_map_single() compute the alloc_size by rounding up mapping_size after adding the offset based on min_align_mask. This has the side effect of fixing the edge case bug but with no other functional change. Also add a sanity test on the alloc_align_mask. While IOMMU code currently ensures the granule is not larger than PAGE_SIZE, if that guarantee were to be removed in the future, the downstream effect on the swiotlb might go unnoticed until strange allocation failures occurred. Tested on an ARM64 system with 16K page size and some kernel test-only hackery to allow modifying the DMA min_align_mask and the granule size that becomes the alloc_align_mask. Tested these combinations with a variety of original memory addresses and sizes, including those that reproduce the edge case bug: * 4K granule and 0 min_align_mask * 4K granule and 0xFFF min_align_mask (4K - 1) * 16K granule and 0xFFF min_align_mask * 64K granule and 0xFFF min_align_mask * 64K granule and 0x3FFF min_align_mask (16K - 1) With the changes, all combinations pass. Signed-off-by: Michael Kelley <mhklinux@outlook.com> Reviewed-by: Petr Tesarik <petr@tesarici.cz> Signed-off-by: Christoph Hellwig <hch@lst.de>
290 lines
8.2 KiB
C
290 lines
8.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_SWIOTLB_H
|
|
#define __LINUX_SWIOTLB_H
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/dma-direction.h>
|
|
#include <linux/init.h>
|
|
#include <linux/types.h>
|
|
#include <linux/limits.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/workqueue.h>
|
|
|
|
struct device;
|
|
struct page;
|
|
struct scatterlist;
|
|
|
|
#define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */
|
|
#define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */
|
|
#define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */
|
|
|
|
/*
|
|
* Maximum allowable number of contiguous slabs to map,
|
|
* must be a power of 2. What is the appropriate value ?
|
|
* The complexity of {map,unmap}_single is linearly dependent on this value.
|
|
*/
|
|
#define IO_TLB_SEGSIZE 128
|
|
|
|
/*
|
|
* log of the size of each IO TLB slab. The number of slabs is command line
|
|
* controllable.
|
|
*/
|
|
#define IO_TLB_SHIFT 11
|
|
#define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
|
|
|
|
/* default to 64MB */
|
|
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
|
|
|
|
unsigned long swiotlb_size_or_default(void);
|
|
void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
|
int (*remap)(void *tlb, unsigned long nslabs));
|
|
int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
|
int (*remap)(void *tlb, unsigned long nslabs));
|
|
extern void __init swiotlb_update_mem_attributes(void);
|
|
|
|
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
|
|
size_t mapping_size,
|
|
unsigned int alloc_aligned_mask, enum dma_data_direction dir,
|
|
unsigned long attrs);
|
|
|
|
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
|
|
phys_addr_t tlb_addr,
|
|
size_t mapping_size,
|
|
enum dma_data_direction dir,
|
|
unsigned long attrs);
|
|
|
|
void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
|
|
size_t size, enum dma_data_direction dir);
|
|
void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
|
|
size_t size, enum dma_data_direction dir);
|
|
dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
|
|
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
|
|
|
#ifdef CONFIG_SWIOTLB
|
|
|
|
/**
|
|
* struct io_tlb_pool - IO TLB memory pool descriptor
|
|
* @start: The start address of the swiotlb memory pool. Used to do a quick
|
|
* range check to see if the memory was in fact allocated by this
|
|
* API.
|
|
* @end: The end address of the swiotlb memory pool. Used to do a quick
|
|
* range check to see if the memory was in fact allocated by this
|
|
* API.
|
|
* @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
|
|
* may be remapped in the memory encrypted case and store virtual
|
|
* address for bounce buffer operation.
|
|
* @nslabs: The number of IO TLB slots between @start and @end. For the
|
|
* default swiotlb, this can be adjusted with a boot parameter,
|
|
* see setup_io_tlb_npages().
|
|
* @late_alloc: %true if allocated using the page allocator.
|
|
* @nareas: Number of areas in the pool.
|
|
* @area_nslabs: Number of slots in each area.
|
|
* @areas: Array of memory area descriptors.
|
|
* @slots: Array of slot descriptors.
|
|
* @node: Member of the IO TLB memory pool list.
|
|
* @rcu: RCU head for swiotlb_dyn_free().
|
|
* @transient: %true if transient memory pool.
|
|
*/
|
|
struct io_tlb_pool {
|
|
phys_addr_t start;
|
|
phys_addr_t end;
|
|
void *vaddr;
|
|
unsigned long nslabs;
|
|
bool late_alloc;
|
|
unsigned int nareas;
|
|
unsigned int area_nslabs;
|
|
struct io_tlb_area *areas;
|
|
struct io_tlb_slot *slots;
|
|
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
|
struct list_head node;
|
|
struct rcu_head rcu;
|
|
bool transient;
|
|
#endif
|
|
};
|
|
|
|
/**
|
|
* struct io_tlb_mem - Software IO TLB allocator
|
|
* @defpool: Default (initial) IO TLB memory pool descriptor.
|
|
* @pool: IO TLB memory pool descriptor (if not dynamic).
|
|
* @nslabs: Total number of IO TLB slabs in all pools.
|
|
* @debugfs: The dentry to debugfs.
|
|
* @force_bounce: %true if swiotlb bouncing is forced
|
|
* @for_alloc: %true if the pool is used for memory allocation
|
|
* @can_grow: %true if more pools can be allocated dynamically.
|
|
* @phys_limit: Maximum allowed physical address.
|
|
* @lock: Lock to synchronize changes to the list.
|
|
* @pools: List of IO TLB memory pool descriptors (if dynamic).
|
|
* @dyn_alloc: Dynamic IO TLB pool allocation work.
|
|
* @total_used: The total number of slots in the pool that are currently used
|
|
* across all areas. Used only for calculating used_hiwater in
|
|
* debugfs.
|
|
* @used_hiwater: The high water mark for total_used. Used only for reporting
|
|
* in debugfs.
|
|
* @transient_nslabs: The total number of slots in all transient pools that
|
|
* are currently used across all areas.
|
|
*/
|
|
struct io_tlb_mem {
|
|
struct io_tlb_pool defpool;
|
|
unsigned long nslabs;
|
|
struct dentry *debugfs;
|
|
bool force_bounce;
|
|
bool for_alloc;
|
|
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
|
bool can_grow;
|
|
u64 phys_limit;
|
|
spinlock_t lock;
|
|
struct list_head pools;
|
|
struct work_struct dyn_alloc;
|
|
#endif
|
|
#ifdef CONFIG_DEBUG_FS
|
|
atomic_long_t total_used;
|
|
atomic_long_t used_hiwater;
|
|
atomic_long_t transient_nslabs;
|
|
#endif
|
|
};
|
|
|
|
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
|
|
|
struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr);
|
|
|
|
#else
|
|
|
|
static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
|
|
phys_addr_t paddr)
|
|
{
|
|
return &dev->dma_io_tlb_mem->defpool;
|
|
}
|
|
|
|
#endif
|
|
|
|
/**
|
|
* is_swiotlb_buffer() - check if a physical address belongs to a swiotlb
|
|
* @dev: Device which has mapped the buffer.
|
|
* @paddr: Physical address within the DMA buffer.
|
|
*
|
|
* Check if @paddr points into a bounce buffer.
|
|
*
|
|
* Return:
|
|
* * %true if @paddr points into a bounce buffer
|
|
* * %false otherwise
|
|
*/
|
|
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
|
|
{
|
|
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
|
|
|
if (!mem)
|
|
return false;
|
|
|
|
#ifdef CONFIG_SWIOTLB_DYNAMIC
|
|
/*
|
|
* All SWIOTLB buffer addresses must have been returned by
|
|
* swiotlb_tbl_map_single() and passed to a device driver.
|
|
* If a SWIOTLB address is checked on another CPU, then it was
|
|
* presumably loaded by the device driver from an unspecified private
|
|
* data structure. Make sure that this load is ordered before reading
|
|
* dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool().
|
|
*
|
|
* This barrier pairs with smp_mb() in swiotlb_find_slots().
|
|
*/
|
|
smp_rmb();
|
|
return READ_ONCE(dev->dma_uses_io_tlb) &&
|
|
swiotlb_find_pool(dev, paddr);
|
|
#else
|
|
return paddr >= mem->defpool.start && paddr < mem->defpool.end;
|
|
#endif
|
|
}
|
|
|
|
static inline bool is_swiotlb_force_bounce(struct device *dev)
|
|
{
|
|
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
|
|
|
|
return mem && mem->force_bounce;
|
|
}
|
|
|
|
void swiotlb_init(bool addressing_limited, unsigned int flags);
|
|
void __init swiotlb_exit(void);
|
|
void swiotlb_dev_init(struct device *dev);
|
|
size_t swiotlb_max_mapping_size(struct device *dev);
|
|
bool is_swiotlb_allocated(void);
|
|
bool is_swiotlb_active(struct device *dev);
|
|
void __init swiotlb_adjust_size(unsigned long size);
|
|
phys_addr_t default_swiotlb_base(void);
|
|
phys_addr_t default_swiotlb_limit(void);
|
|
#else
|
|
static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
|
|
{
|
|
}
|
|
|
|
static inline void swiotlb_dev_init(struct device *dev)
|
|
{
|
|
}
|
|
|
|
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool is_swiotlb_force_bounce(struct device *dev)
|
|
{
|
|
return false;
|
|
}
|
|
static inline void swiotlb_exit(void)
|
|
{
|
|
}
|
|
static inline size_t swiotlb_max_mapping_size(struct device *dev)
|
|
{
|
|
return SIZE_MAX;
|
|
}
|
|
|
|
static inline bool is_swiotlb_allocated(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool is_swiotlb_active(struct device *dev)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline void swiotlb_adjust_size(unsigned long size)
|
|
{
|
|
}
|
|
|
|
static inline phys_addr_t default_swiotlb_base(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline phys_addr_t default_swiotlb_limit(void)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif /* CONFIG_SWIOTLB */
|
|
|
|
extern void swiotlb_print_info(void);
|
|
|
|
#ifdef CONFIG_DMA_RESTRICTED_POOL
|
|
struct page *swiotlb_alloc(struct device *dev, size_t size);
|
|
bool swiotlb_free(struct device *dev, struct page *page, size_t size);
|
|
|
|
static inline bool is_swiotlb_for_alloc(struct device *dev)
|
|
{
|
|
return dev->dma_io_tlb_mem->for_alloc;
|
|
}
|
|
#else
|
|
static inline struct page *swiotlb_alloc(struct device *dev, size_t size)
|
|
{
|
|
return NULL;
|
|
}
|
|
static inline bool swiotlb_free(struct device *dev, struct page *page,
|
|
size_t size)
|
|
{
|
|
return false;
|
|
}
|
|
static inline bool is_swiotlb_for_alloc(struct device *dev)
|
|
{
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_DMA_RESTRICTED_POOL */
|
|
|
|
#endif /* __LINUX_SWIOTLB_H */
|