mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
dma-mapping fixes for Linux 5.19
- fix a regressin in setting swiotlb ->force_bounce (me) - make dma-debug less chatty (Rob Clark) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAmKdoHMLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYOzIw/9FHEZpid2sn1eL9mkJIwl+OlM3lDXg2VmJ4xUto7V 5ha17ftGO5P005P7YuPHsGgtdAA6Lg8Q3LtbZ+HkUnb04ApOhnDhtqo9KqT/ylGL PTAzOcQPdTFJTElU9ijDCH2LlzgOWK1qcsnlnDRV8X9iCh1SFoU0ByGtjZC6e3sG g5cXHkdT91ONiD3vdh/puzZ43VpWIW2YUoumtmbf5WtmjEKSyfPjkks4Wb/KSPxQ wJdLkgzBdGzMg9Sr6bH2JUMjXnKRkWvHxa7m47ybmeC1s7HTVxlRh9lUEEC/suIo 1+i/zsV6Tbif3ixu2Anxtk+x3aY2NwW5FDTU86wNHw5vYyO/FCaBCOBvo+d+GZB7 xNARWh9V6r/HwTYflzgBlwUCfsnIWFZVZ+Q8xm1zVuYuoVVnzjl6yidHL1sVWf3H 0w+aA63ivMTtsQOfzG9rEbYvtPOFU+Z84SgHqHgq7GxaxGy3xyiVXYdKznKt4QY7 HmKdzh0XSOu6roj2pNlT3F1xlp47J3DOtFBZ2kQ3yw4bhLHGHSr4+GX2FPnxdSTC zrfG0ODDLfRTi/TkfjjDM2/f7HrQPWmutfBwyNwj/0vG/UT4XWSjo1HEcdghR5Hm J0/zD2elkNHX/ysHs52oLoKuLB1EzqG9Fmwl7RV8bNMhf4X0QtAGfLon+rc0SuuS P4I= =JdkI -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.19-2022-06-06' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping fixes from Christoph Hellwig: - fix a regressin in setting swiotlb ->force_bounce (me) - make dma-debug less chatty (Rob Clark) * tag 'dma-mapping-5.19-2022-06-06' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: fix setting ->force_bounce dma-debug: make things less spammy under memory pressure
This commit is contained in:
commit
e71e60cd74
@ -564,7 +564,7 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
|
|||||||
|
|
||||||
rc = active_cacheline_insert(entry);
|
rc = active_cacheline_insert(entry);
|
||||||
if (rc == -ENOMEM) {
|
if (rc == -ENOMEM) {
|
||||||
pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
|
pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
|
||||||
global_disable = true;
|
global_disable = true;
|
||||||
} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
} else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
|
||||||
err_printk(entry->dev, entry,
|
err_printk(entry->dev, entry,
|
||||||
|
@ -192,7 +192,7 @@ void __init swiotlb_update_mem_attributes(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
||||||
unsigned long nslabs, bool late_alloc)
|
unsigned long nslabs, unsigned int flags, bool late_alloc)
|
||||||
{
|
{
|
||||||
void *vaddr = phys_to_virt(start);
|
void *vaddr = phys_to_virt(start);
|
||||||
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
|
unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
|
||||||
@ -203,8 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
|
|||||||
mem->index = 0;
|
mem->index = 0;
|
||||||
mem->late_alloc = late_alloc;
|
mem->late_alloc = late_alloc;
|
||||||
|
|
||||||
if (swiotlb_force_bounce)
|
mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
|
||||||
mem->force_bounce = true;
|
|
||||||
|
|
||||||
spin_lock_init(&mem->lock);
|
spin_lock_init(&mem->lock);
|
||||||
for (i = 0; i < mem->nslabs; i++) {
|
for (i = 0; i < mem->nslabs; i++) {
|
||||||
@ -275,8 +274,7 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
|
|||||||
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
|
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
|
||||||
__func__, alloc_size, PAGE_SIZE);
|
__func__, alloc_size, PAGE_SIZE);
|
||||||
|
|
||||||
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
|
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false);
|
||||||
mem->force_bounce = flags & SWIOTLB_FORCE;
|
|
||||||
|
|
||||||
if (flags & SWIOTLB_VERBOSE)
|
if (flags & SWIOTLB_VERBOSE)
|
||||||
swiotlb_print_info();
|
swiotlb_print_info();
|
||||||
@ -348,7 +346,7 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,
|
|||||||
|
|
||||||
set_memory_decrypted((unsigned long)vstart,
|
set_memory_decrypted((unsigned long)vstart,
|
||||||
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
|
(nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
|
||||||
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true);
|
swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true);
|
||||||
|
|
||||||
swiotlb_print_info();
|
swiotlb_print_info();
|
||||||
return 0;
|
return 0;
|
||||||
@ -835,8 +833,8 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
|
|||||||
|
|
||||||
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
|
set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
|
||||||
rmem->size >> PAGE_SHIFT);
|
rmem->size >> PAGE_SHIFT);
|
||||||
swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
|
swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
|
||||||
mem->force_bounce = true;
|
false);
|
||||||
mem->for_alloc = true;
|
mem->for_alloc = true;
|
||||||
|
|
||||||
rmem->priv = mem;
|
rmem->priv = mem;
|
||||||
|
Loading…
Reference in New Issue
Block a user