From 67a540c60c39d052d9599aa9909023152200a707 Mon Sep 17 00:00:00 2001 From: Tony Battersby Date: Thu, 26 Jan 2023 13:51:15 -0800 Subject: [PATCH 01/10] dmapool: remove checks for dev == NULL dmapool originally tried to support pools without a device because dma_alloc_coherent() supports allocations without a device. But nobody ended up using dma pools without a device, and trying to do so will result in an oops. So remove the checks for pool->dev == NULL since they are unneeded bloat. [kbusch@kernel.org: add check for null dev on create] Link: https://lkml.kernel.org/r/20230126215125.4069751-3-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Tony Battersby Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/dmapool.c | 45 ++++++++++++++------------------------------- 1 file changed, 14 insertions(+), 31 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index a7eb5d0eb2da..559207e1c333 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -134,6 +134,9 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, size_t allocation; bool empty = false; + if (!dev) + return NULL; + if (align == 0) align = 1; else if (align & (align - 1)) @@ -275,7 +278,7 @@ void dma_pool_destroy(struct dma_pool *pool) mutex_lock(&pools_reg_lock); mutex_lock(&pools_lock); list_del(&pool->pools); - if (pool->dev && list_empty(&pool->dev->dma_pools)) + if (list_empty(&pool->dev->dma_pools)) empty = true; mutex_unlock(&pools_lock); if (empty) @@ -284,12 +287,8 @@ void dma_pool_destroy(struct dma_pool *pool) list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { if (is_page_busy(page)) { - if (pool->dev) - dev_err(pool->dev, "%s %s, %p busy\n", __func__, - pool->name, page->vaddr); - else - pr_err("%s %s, %p busy\n", __func__, - pool->name, page->vaddr); + dev_err(pool->dev, "%s %s, %p busy\n", __func__, + pool->name, page->vaddr); /* leak the still-in-use consistent memory */ list_del(&page->page_list); kfree(page); @@ -351,12 +350,8 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, for (i = sizeof(page->offset); i < pool->size; i++) { if (data[i] == POOL_POISON_FREED) continue; - if (pool->dev) - dev_err(pool->dev, "%s %s, %p (corrupted)\n", - __func__, pool->name, retval); - else - pr_err("%s %s, %p (corrupted)\n", - __func__, pool->name, retval); + dev_err(pool->dev, "%s %s, %p (corrupted)\n", + __func__, pool->name, retval); /* * Dump the first 4 bytes even if they are not @@ -411,12 +406,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) page = pool_find_page(pool, dma); if (!page) { spin_unlock_irqrestore(&pool->lock, flags); - if (pool->dev) - dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", - __func__, pool->name, vaddr, &dma); - else - pr_err("%s %s, %p/%pad (bad dma)\n", - __func__, pool->name, vaddr, &dma); + dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", + __func__, pool->name, vaddr, &dma); return; } @@ -426,12 +417,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) #ifdef DMAPOOL_DEBUG if ((dma - page->dma) != offset) { spin_unlock_irqrestore(&pool->lock, flags); - if (pool->dev) - dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", - __func__, pool->name, vaddr, &dma); - else - pr_err("%s %s, %p (bad vaddr)/%pad\n", - __func__, pool->name, vaddr, &dma); + dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", + __func__, pool->name, vaddr, &dma); return; } { @@ -442,12 +429,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) continue; } spin_unlock_irqrestore(&pool->lock, flags); - if (pool->dev) - dev_err(pool->dev, "%s %s, dma %pad already free\n", - __func__, pool->name, &dma); - else - pr_err("%s %s, dma %pad already free\n", - __func__, pool->name, &dma); + dev_err(pool->dev, "%s %s, dma %pad already free\n", + __func__, pool->name, &dma); return; } } From 08cc96c894848bcd1d15a79b15c56a8bb4f07ff5 Mon Sep 17 00:00:00 2001 From: Tony Battersby Date: Thu, 26 Jan 2023 13:51:16 -0800 Subject: [PATCH 02/10] dmapool: use sysfs_emit() instead of scnprintf() Use sysfs_emit instead of scnprintf, snprintf or sprintf. Link: https://lkml.kernel.org/r/20230126215125.4069751-4-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Tony Battersby Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/dmapool.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 559207e1c333..20616b760bb9 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -64,18 +64,11 @@ static DEFINE_MUTEX(pools_reg_lock); static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf) { - unsigned temp; - unsigned size; - char *next; + int size; struct dma_page *page; struct dma_pool *pool; - next = buf; - size = PAGE_SIZE; - - temp = scnprintf(next, size, "poolinfo - 0.1\n"); - size -= temp; - next += temp; + size = sysfs_emit(buf, "poolinfo - 0.1\n"); mutex_lock(&pools_lock); list_for_each_entry(pool, &dev->dma_pools, pools) { @@ -90,16 +83,14 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha spin_unlock_irq(&pool->lock); /* per-pool info, no real statistics yet */ - temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", - pool->name, blocks, - pages * (pool->allocation / pool->size), - pool->size, pages); - size -= temp; - next += temp; + size += sysfs_emit_at(buf, size, "%-16s %4u %4zu %4zu %2u\n", + pool->name, blocks, + pages * (pool->allocation / pool->size), + pool->size, pages); } mutex_unlock(&pools_lock); - return PAGE_SIZE - size; + return size; } static DEVICE_ATTR_RO(pools); From 790233528d338f1467662761cf1e871086483ab8 Mon Sep 17 00:00:00 2001 From: Tony Battersby Date: Thu, 26 Jan 2023 13:51:17 -0800 Subject: [PATCH 03/10] dmapool: cleanup integer types To represent the size of a single allocation, dmapool currently uses 'unsigned int' in some places and 'size_t' in other places. Standardize on 'unsigned int' to reduce overhead, but use 'size_t' when counting all the blocks in the entire pool. Link: https://lkml.kernel.org/r/20230126215125.4069751-5-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Tony Battersby Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/dmapool.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 20616b760bb9..ee993bb59fc2 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -43,10 +43,10 @@ struct dma_pool { /* the pool */ struct list_head page_list; spinlock_t lock; - size_t size; struct device *dev; - size_t allocation; - size_t boundary; + unsigned int size; + unsigned int allocation; + unsigned int boundary; char name[32]; struct list_head pools; }; @@ -73,7 +73,7 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha mutex_lock(&pools_lock); list_for_each_entry(pool, &dev->dma_pools, pools) { unsigned pages = 0; - unsigned blocks = 0; + size_t blocks = 0; spin_lock_irq(&pool->lock); list_for_each_entry(page, &pool->page_list, page_list) { @@ -83,9 +83,10 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha spin_unlock_irq(&pool->lock); /* per-pool info, no real statistics yet */ - size += sysfs_emit_at(buf, size, "%-16s %4u %4zu %4zu %2u\n", + size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n", pool->name, blocks, - pages * (pool->allocation / pool->size), + (size_t) pages * + (pool->allocation / pool->size), pool->size, pages); } mutex_unlock(&pools_lock); @@ -133,7 +134,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, else if (align & (align - 1)) return NULL; - if (size == 0) + if (size == 0 || size > INT_MAX) return NULL; else if (size < 4) size = 4; @@ -146,6 +147,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, else if ((boundary < size) || (boundary & (boundary - 1))) return NULL; + boundary = min(boundary, allocation); + retval = kmalloc(sizeof(*retval), GFP_KERNEL); if (!retval) return retval; @@ -306,7 +309,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, { unsigned long flags; struct dma_page *page; - size_t offset; + unsigned int offset; void *retval; might_alloc(mem_flags); From 290911c56f98ac7af9354108f3d16da5d6c5189c Mon Sep 17 00:00:00 2001 From: Tony Battersby Date: Thu, 26 Jan 2023 13:51:18 -0800 Subject: [PATCH 04/10] dmapool: speedup DMAPOOL_DEBUG with init_on_alloc Avoid double-memset of the same allocated memory in dma_pool_alloc() when both DMAPOOL_DEBUG is enabled and init_on_alloc=1. Link: https://lkml.kernel.org/r/20230126215125.4069751-6-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Tony Battersby Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/dmapool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index ee993bb59fc2..eaed3ffb42aa 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -356,7 +356,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, break; } } - if (!(mem_flags & __GFP_ZERO)) + if (!want_init_on_alloc(mem_flags)) memset(retval, POOL_POISON_ALLOCATED, pool->size); #endif spin_unlock_irqrestore(&pool->lock, flags); From d93e08b7556fcd393b7fd1eb421cb44e5fae314c Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 26 Jan 2023 13:51:19 -0800 Subject: [PATCH 05/10] dmapool: move debug code to own functions Clean up the normal path by moving the debug code outside it. Link: https://lkml.kernel.org/r/20230126215125.4069751-7-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Cc: Tony Battersby Signed-off-by: Andrew Morton --- mm/dmapool.c | 128 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 77 insertions(+), 51 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index eaed3ffb42aa..30b069e99996 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -96,6 +96,78 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha static DEVICE_ATTR_RO(pools); +#ifdef DMAPOOL_DEBUG +static void pool_check_block(struct dma_pool *pool, void *retval, + unsigned int offset, gfp_t mem_flags) +{ + int i; + u8 *data = retval; + /* page->offset is stored in first 4 bytes */ + for (i = sizeof(offset); i < pool->size; i++) { + if (data[i] == POOL_POISON_FREED) + continue; + dev_err(pool->dev, "%s %s, %p (corrupted)\n", + __func__, pool->name, retval); + + /* + * Dump the first 4 bytes even if they are not + * POOL_POISON_FREED + */ + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, + data, pool->size, 1); + break; + } + if (!want_init_on_alloc(mem_flags)) + memset(retval, POOL_POISON_ALLOCATED, pool->size); +} + +static bool pool_page_err(struct dma_pool *pool, struct dma_page *page, + void *vaddr, dma_addr_t dma) +{ + unsigned int offset = vaddr - page->vaddr; + unsigned int chain = page->offset; + + if ((dma - page->dma) != offset) { + dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", + __func__, pool->name, vaddr, &dma); + return true; + } + + while (chain < pool->allocation) { + if (chain != offset) { + chain = *(int *)(page->vaddr + chain); + continue; + } + dev_err(pool->dev, "%s %s, dma %pad already free\n", + __func__, pool->name, &dma); + return true; + } + memset(vaddr, POOL_POISON_FREED, pool->size); + return false; +} + +static void pool_init_page(struct dma_pool *pool, struct dma_page *page) +{ + memset(page->vaddr, POOL_POISON_FREED, pool->allocation); +} +#else +static void pool_check_block(struct dma_pool *pool, void *retval, + unsigned int offset, gfp_t mem_flags) + +{ +} + +static bool pool_page_err(struct dma_pool *pool, struct dma_page *page, + void *vaddr, dma_addr_t dma) +{ + return false; +} + +static void pool_init_page(struct dma_pool *pool, struct dma_page *page) +{ +} +#endif + /** * dma_pool_create - Creates a pool of consistent memory blocks, for dma. * @name: name of pool, for diagnostics @@ -223,9 +295,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, &page->dma, mem_flags); if (page->vaddr) { -#ifdef DMAPOOL_DEBUG - memset(page->vaddr, POOL_POISON_FREED, pool->allocation); -#endif + pool_init_page(pool, page); pool_initialise_page(pool, page); page->in_use = 0; page->offset = 0; @@ -245,9 +315,7 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page) { dma_addr_t dma = page->dma; -#ifdef DMAPOOL_DEBUG - memset(page->vaddr, POOL_POISON_FREED, pool->allocation); -#endif + pool_init_page(pool, page); dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); list_del(&page->page_list); kfree(page); @@ -336,29 +404,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, page->offset = *(int *)(page->vaddr + offset); retval = offset + page->vaddr; *handle = offset + page->dma; -#ifdef DMAPOOL_DEBUG - { - int i; - u8 *data = retval; - /* page->offset is stored in first 4 bytes */ - for (i = sizeof(page->offset); i < pool->size; i++) { - if (data[i] == POOL_POISON_FREED) - continue; - dev_err(pool->dev, "%s %s, %p (corrupted)\n", - __func__, pool->name, retval); - - /* - * Dump the first 4 bytes even if they are not - * POOL_POISON_FREED - */ - print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, - data, pool->size, 1); - break; - } - } - if (!want_init_on_alloc(mem_flags)) - memset(retval, POOL_POISON_ALLOCATED, pool->size); -#endif + pool_check_block(pool, retval, offset, mem_flags); spin_unlock_irqrestore(&pool->lock, flags); if (want_init_on_alloc(mem_flags)) @@ -394,7 +440,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) { struct dma_page *page; unsigned long flags; - unsigned int offset; spin_lock_irqsave(&pool->lock, flags); page = pool_find_page(pool, dma); @@ -405,35 +450,16 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) return; } - offset = vaddr - page->vaddr; if (want_init_on_free()) memset(vaddr, 0, pool->size); -#ifdef DMAPOOL_DEBUG - if ((dma - page->dma) != offset) { + if (pool_page_err(pool, page, vaddr, dma)) { spin_unlock_irqrestore(&pool->lock, flags); - dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", - __func__, pool->name, vaddr, &dma); return; } - { - unsigned int chain = page->offset; - while (chain < pool->allocation) { - if (chain != offset) { - chain = *(int *)(page->vaddr + chain); - continue; - } - spin_unlock_irqrestore(&pool->lock, flags); - dev_err(pool->dev, "%s %s, dma %pad already free\n", - __func__, pool->name, &dma); - return; - } - } - memset(vaddr, POOL_POISON_FREED, pool->size); -#endif page->in_use--; *(int *)vaddr = page->offset; - page->offset = offset; + page->offset = vaddr - page->vaddr; /* * Resist a temptation to do * if (!is_page_busy(page)) pool_free_page(pool, page); From 5407df10e5754b80d43697a75484bacda048fef8 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 26 Jan 2023 13:51:20 -0800 Subject: [PATCH 06/10] dmapool: rearrange page alloc failure handling Handle the error in a condition so the good path can be in the normal flow. Link: https://lkml.kernel.org/r/20230126215125.4069751-8-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Cc: Tony Battersby Signed-off-by: Andrew Morton --- mm/dmapool.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 30b069e99996..900f2afa363a 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -292,17 +292,19 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) page = kmalloc(sizeof(*page), mem_flags); if (!page) return NULL; + page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, &page->dma, mem_flags); - if (page->vaddr) { - pool_init_page(pool, page); - pool_initialise_page(pool, page); - page->in_use = 0; - page->offset = 0; - } else { + if (!page->vaddr) { kfree(page); - page = NULL; + return NULL; } + + pool_init_page(pool, page); + pool_initialise_page(pool, page); + page->in_use = 0; + page->offset = 0; + return page; } From f0bccea6bc0caa0db9c68d28123f242a6295c5dd Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 26 Jan 2023 13:51:21 -0800 Subject: [PATCH 07/10] dmapool: consolidate page initialization Various fields of the dma pool are set in different places. Move it all to one function. Link: https://lkml.kernel.org/r/20230126215125.4069751-9-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Cc: Tony Battersby Signed-off-by: Andrew Morton --- mm/dmapool.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 900f2afa363a..9e98065a68b1 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -274,6 +274,9 @@ static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) unsigned int offset = 0; unsigned int next_boundary = pool->boundary; + pool_init_page(pool, page); + page->in_use = 0; + page->offset = 0; do { unsigned int next = offset + pool->size; if (unlikely((next + pool->size) >= next_boundary)) { @@ -300,11 +303,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) return NULL; } - pool_init_page(pool, page); pool_initialise_page(pool, page); - page->in_use = 0; - page->offset = 0; - return page; } From cc669954ab38866c1aba73c27e17bb07bcb4a194 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 26 Jan 2023 13:51:22 -0800 Subject: [PATCH 08/10] dmapool: simplify freeing The actions for busy and not busy are mostly the same, so combine these and remove the unnecessary function. Also, the pool is about to be freed so there's no need to poison the page data since we only check for poison on alloc, which can't be done on a freed pool. Link: https://lkml.kernel.org/r/20230126215125.4069751-10-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Cc: Tony Battersby Signed-off-by: Andrew Morton --- mm/dmapool.c | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 9e98065a68b1..4dea2a0dbd33 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -312,16 +312,6 @@ static inline bool is_page_busy(struct dma_page *page) return page->in_use != 0; } -static void pool_free_page(struct dma_pool *pool, struct dma_page *page) -{ - dma_addr_t dma = page->dma; - - pool_init_page(pool, page); - dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); - list_del(&page->page_list); - kfree(page); -} - /** * dma_pool_destroy - destroys a pool of dma memory blocks. * @pool: dma pool that will be destroyed @@ -349,14 +339,14 @@ void dma_pool_destroy(struct dma_pool *pool) mutex_unlock(&pools_reg_lock); list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { - if (is_page_busy(page)) { + if (!is_page_busy(page)) + dma_free_coherent(pool->dev, pool->allocation, + page->vaddr, page->dma); + else dev_err(pool->dev, "%s %s, %p busy\n", __func__, pool->name, page->vaddr); - /* leak the still-in-use consistent memory */ - list_del(&page->page_list); - kfree(page); - } else - pool_free_page(pool, page); + list_del(&page->page_list); + kfree(page); } kfree(pool); From 8ecc369554219060367fc589661d2b7ab201e923 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 26 Jan 2023 13:51:23 -0800 Subject: [PATCH 09/10] dmapool: don't memset on free twice If debug is enabled, dmapool will poison the range, so no need to clear it to 0 immediately before writing over it. Link: https://lkml.kernel.org/r/20230126215125.4069751-11-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Cc: Tony Battersby Signed-off-by: Andrew Morton --- mm/dmapool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 4dea2a0dbd33..21e6d362c726 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -160,6 +160,8 @@ static void pool_check_block(struct dma_pool *pool, void *retval, static bool pool_page_err(struct dma_pool *pool, struct dma_page *page, void *vaddr, dma_addr_t dma) { + if (want_init_on_free()) + memset(vaddr, 0, pool->size); return false; } @@ -441,8 +443,6 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) return; } - if (want_init_on_free()) - memset(vaddr, 0, pool->size); if (pool_page_err(pool, page, vaddr, dma)) { spin_unlock_irqrestore(&pool->lock, flags); return; From da9619a30e73b59605ed998bf7bc4359f5c0029a Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 26 Jan 2023 13:51:24 -0800 Subject: [PATCH 10/10] dmapool: link blocks across pages The allocated dmapool pages are never freed for the lifetime of the pool. There is no need for the two level list+stack lookup for finding a free block since nothing is ever removed from the list. Just use a simple stack, reducing time complexity to constant. The implementation inserts the stack linking elements and the dma handle of the block within itself when freed. This means the smallest possible dmapool block is increased to at most 16 bytes to accommodate these fields, but there are no exisiting users requesting a dma pool smaller than that anyway. Removing the list has a significant change in performance. Using the kernel's micro-benchmarking self test: Before: # modprobe dmapool_test dmapool test: size:16 blocks:8192 time:57282 dmapool test: size:64 blocks:8192 time:172562 dmapool test: size:256 blocks:8192 time:789247 dmapool test: size:1024 blocks:2048 time:371823 dmapool test: size:4096 blocks:1024 time:362237 After: # modprobe dmapool_test dmapool test: size:16 blocks:8192 time:24997 dmapool test: size:64 blocks:8192 time:26584 dmapool test: size:256 blocks:8192 time:33542 dmapool test: size:1024 blocks:2048 time:9022 dmapool test: size:4096 blocks:1024 time:6045 The module test allocates quite a few blocks that may not accurately represent how these pools are used in real life. For a more marco level benchmark, running fio high-depth + high-batched on nvme, this patch shows submission and completion latency reduced by ~100usec each, 1% IOPs improvement, and perf record's time spent in dma_pool_alloc/free were reduced by half. [kbusch@kernel.org: push new blocks in ascending order] Link: https://lkml.kernel.org/r/20230221165400.1595247-1-kbusch@meta.com Link: https://lkml.kernel.org/r/20230126215125.4069751-12-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Tested-by: Bryan O'Donoghue Cc: Matthew Wilcox Cc: Tony Battersby Signed-off-by: Andrew Morton --- mm/dmapool.c | 259 ++++++++++++++++++++++++++------------------------- 1 file changed, 131 insertions(+), 128 deletions(-) diff --git a/mm/dmapool.c b/mm/dmapool.c index 21e6d362c726..d2b0f8fc9649 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -15,7 +15,7 @@ * represented by the 'struct dma_pool' which keeps a doubly-linked list of * allocated pages. Each page in the page_list is split into blocks of at * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked - * list of free blocks within the page. Used blocks aren't tracked, but we + * list of free blocks across all pages. Used blocks aren't tracked, but we * keep a count of how many are currently allocated from each page. */ @@ -40,9 +40,18 @@ #define DMAPOOL_DEBUG 1 #endif +struct dma_block { + struct dma_block *next_block; + dma_addr_t dma; +}; + struct dma_pool { /* the pool */ struct list_head page_list; spinlock_t lock; + struct dma_block *next_block; + size_t nr_blocks; + size_t nr_active; + size_t nr_pages; struct device *dev; unsigned int size; unsigned int allocation; @@ -55,8 +64,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */ struct list_head page_list; void *vaddr; dma_addr_t dma; - unsigned int in_use; - unsigned int offset; }; static DEFINE_MUTEX(pools_lock); @@ -64,30 +71,18 @@ static DEFINE_MUTEX(pools_reg_lock); static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf) { - int size; - struct dma_page *page; struct dma_pool *pool; + unsigned size; size = sysfs_emit(buf, "poolinfo - 0.1\n"); mutex_lock(&pools_lock); list_for_each_entry(pool, &dev->dma_pools, pools) { - unsigned pages = 0; - size_t blocks = 0; - - spin_lock_irq(&pool->lock); - list_for_each_entry(page, &pool->page_list, page_list) { - pages++; - blocks += page->in_use; - } - spin_unlock_irq(&pool->lock); - /* per-pool info, no real statistics yet */ - size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n", - pool->name, blocks, - (size_t) pages * - (pool->allocation / pool->size), - pool->size, pages); + size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n", + pool->name, pool->nr_active, + pool->nr_blocks, pool->size, + pool->nr_pages); } mutex_unlock(&pools_lock); @@ -97,17 +92,17 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha static DEVICE_ATTR_RO(pools); #ifdef DMAPOOL_DEBUG -static void pool_check_block(struct dma_pool *pool, void *retval, - unsigned int offset, gfp_t mem_flags) +static void pool_check_block(struct dma_pool *pool, struct dma_block *block, + gfp_t mem_flags) { + u8 *data = (void *)block; int i; - u8 *data = retval; - /* page->offset is stored in first 4 bytes */ - for (i = sizeof(offset); i < pool->size; i++) { + + for (i = sizeof(struct dma_block); i < pool->size; i++) { if (data[i] == POOL_POISON_FREED) continue; - dev_err(pool->dev, "%s %s, %p (corrupted)\n", - __func__, pool->name, retval); + dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__, + pool->name, block); /* * Dump the first 4 bytes even if they are not @@ -117,31 +112,46 @@ static void pool_check_block(struct dma_pool *pool, void *retval, data, pool->size, 1); break; } + if (!want_init_on_alloc(mem_flags)) - memset(retval, POOL_POISON_ALLOCATED, pool->size); + memset(block, POOL_POISON_ALLOCATED, pool->size); } -static bool pool_page_err(struct dma_pool *pool, struct dma_page *page, - void *vaddr, dma_addr_t dma) +static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) { - unsigned int offset = vaddr - page->vaddr; - unsigned int chain = page->offset; + struct dma_page *page; - if ((dma - page->dma) != offset) { - dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", + list_for_each_entry(page, &pool->page_list, page_list) { + if (dma < page->dma) + continue; + if ((dma - page->dma) < pool->allocation) + return page; + } + return NULL; +} + +static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma) +{ + struct dma_block *block = pool->next_block; + struct dma_page *page; + + page = pool_find_page(pool, dma); + if (!page) { + dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", __func__, pool->name, vaddr, &dma); return true; } - while (chain < pool->allocation) { - if (chain != offset) { - chain = *(int *)(page->vaddr + chain); + while (block) { + if (block != vaddr) { + block = block->next_block; continue; } dev_err(pool->dev, "%s %s, dma %pad already free\n", __func__, pool->name, &dma); return true; } + memset(vaddr, POOL_POISON_FREED, pool->size); return false; } @@ -151,14 +161,12 @@ static void pool_init_page(struct dma_pool *pool, struct dma_page *page) memset(page->vaddr, POOL_POISON_FREED, pool->allocation); } #else -static void pool_check_block(struct dma_pool *pool, void *retval, - unsigned int offset, gfp_t mem_flags) - +static void pool_check_block(struct dma_pool *pool, struct dma_block *block, + gfp_t mem_flags) { } -static bool pool_page_err(struct dma_pool *pool, struct dma_page *page, - void *vaddr, dma_addr_t dma) +static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma) { if (want_init_on_free()) memset(vaddr, 0, pool->size); @@ -170,6 +178,26 @@ static void pool_init_page(struct dma_pool *pool, struct dma_page *page) } #endif +static struct dma_block *pool_block_pop(struct dma_pool *pool) +{ + struct dma_block *block = pool->next_block; + + if (block) { + pool->next_block = block->next_block; + pool->nr_active++; + } + return block; +} + +static void pool_block_push(struct dma_pool *pool, struct dma_block *block, + dma_addr_t dma) +{ + block->dma = dma; + block->next_block = pool->next_block; + pool->next_block = block; +} + + /** * dma_pool_create - Creates a pool of consistent memory blocks, for dma. * @name: name of pool, for diagnostics @@ -210,8 +238,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, if (size == 0 || size > INT_MAX) return NULL; - else if (size < 4) - size = 4; + if (size < sizeof(struct dma_block)) + size = sizeof(struct dma_block); size = ALIGN(size, align); allocation = max_t(size_t, size, PAGE_SIZE); @@ -223,7 +251,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, boundary = min(boundary, allocation); - retval = kmalloc(sizeof(*retval), GFP_KERNEL); + retval = kzalloc(sizeof(*retval), GFP_KERNEL); if (!retval) return retval; @@ -236,7 +264,6 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, retval->size = size; retval->boundary = boundary; retval->allocation = allocation; - INIT_LIST_HEAD(&retval->pools); /* @@ -273,21 +300,36 @@ EXPORT_SYMBOL(dma_pool_create); static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) { - unsigned int offset = 0; - unsigned int next_boundary = pool->boundary; + unsigned int next_boundary = pool->boundary, offset = 0; + struct dma_block *block, *first = NULL, *last = NULL; pool_init_page(pool, page); - page->in_use = 0; - page->offset = 0; - do { - unsigned int next = offset + pool->size; - if (unlikely((next + pool->size) >= next_boundary)) { - next = next_boundary; + while (offset + pool->size <= pool->allocation) { + if (offset + pool->size > next_boundary) { + offset = next_boundary; next_boundary += pool->boundary; + continue; } - *(int *)(page->vaddr + offset) = next; - offset = next; - } while (offset < pool->allocation); + + block = page->vaddr + offset; + block->dma = page->dma + offset; + block->next_block = NULL; + + if (last) + last->next_block = block; + else + first = block; + last = block; + + offset += pool->size; + pool->nr_blocks++; + } + + last->next_block = pool->next_block; + pool->next_block = first; + + list_add(&page->page_list, &pool->page_list); + pool->nr_pages++; } static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) @@ -305,15 +347,9 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) return NULL; } - pool_initialise_page(pool, page); return page; } -static inline bool is_page_busy(struct dma_page *page) -{ - return page->in_use != 0; -} - /** * dma_pool_destroy - destroys a pool of dma memory blocks. * @pool: dma pool that will be destroyed @@ -325,7 +361,7 @@ static inline bool is_page_busy(struct dma_page *page) void dma_pool_destroy(struct dma_pool *pool) { struct dma_page *page, *tmp; - bool empty = false; + bool empty = false, busy = false; if (unlikely(!pool)) return; @@ -340,13 +376,15 @@ void dma_pool_destroy(struct dma_pool *pool) device_remove_file(pool->dev, &dev_attr_pools); mutex_unlock(&pools_reg_lock); + if (pool->nr_active) { + dev_err(pool->dev, "%s %s busy\n", __func__, pool->name); + busy = true; + } + list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { - if (!is_page_busy(page)) + if (!busy) dma_free_coherent(pool->dev, pool->allocation, page->vaddr, page->dma); - else - dev_err(pool->dev, "%s %s, %p busy\n", __func__, - pool->name, page->vaddr); list_del(&page->page_list); kfree(page); } @@ -368,58 +406,40 @@ EXPORT_SYMBOL(dma_pool_destroy); void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { - unsigned long flags; + struct dma_block *block; struct dma_page *page; - unsigned int offset; - void *retval; + unsigned long flags; might_alloc(mem_flags); spin_lock_irqsave(&pool->lock, flags); - list_for_each_entry(page, &pool->page_list, page_list) { - if (page->offset < pool->allocation) - goto ready; + block = pool_block_pop(pool); + if (!block) { + /* + * pool_alloc_page() might sleep, so temporarily drop + * &pool->lock + */ + spin_unlock_irqrestore(&pool->lock, flags); + + page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); + if (!page) + return NULL; + + spin_lock_irqsave(&pool->lock, flags); + pool_initialise_page(pool, page); + block = pool_block_pop(pool); } - - /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ - spin_unlock_irqrestore(&pool->lock, flags); - - page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); - if (!page) - return NULL; - - spin_lock_irqsave(&pool->lock, flags); - - list_add(&page->page_list, &pool->page_list); - ready: - page->in_use++; - offset = page->offset; - page->offset = *(int *)(page->vaddr + offset); - retval = offset + page->vaddr; - *handle = offset + page->dma; - pool_check_block(pool, retval, offset, mem_flags); spin_unlock_irqrestore(&pool->lock, flags); + *handle = block->dma; + pool_check_block(pool, block, mem_flags); if (want_init_on_alloc(mem_flags)) - memset(retval, 0, pool->size); + memset(block, 0, pool->size); - return retval; + return block; } EXPORT_SYMBOL(dma_pool_alloc); -static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) -{ - struct dma_page *page; - - list_for_each_entry(page, &pool->page_list, page_list) { - if (dma < page->dma) - continue; - if ((dma - page->dma) < pool->allocation) - return page; - } - return NULL; -} - /** * dma_pool_free - put block back into dma pool * @pool: the dma pool holding the block @@ -431,31 +451,14 @@ static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) */ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) { - struct dma_page *page; + struct dma_block *block = vaddr; unsigned long flags; spin_lock_irqsave(&pool->lock, flags); - page = pool_find_page(pool, dma); - if (!page) { - spin_unlock_irqrestore(&pool->lock, flags); - dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", - __func__, pool->name, vaddr, &dma); - return; + if (!pool_block_err(pool, vaddr, dma)) { + pool_block_push(pool, block, dma); + pool->nr_active--; } - - if (pool_page_err(pool, page, vaddr, dma)) { - spin_unlock_irqrestore(&pool->lock, flags); - return; - } - - page->in_use--; - *(int *)vaddr = page->offset; - page->offset = vaddr - page->vaddr; - /* - * Resist a temptation to do - * if (!is_page_busy(page)) pool_free_page(pool, page); - * Better have a few empty pages hang around. - */ spin_unlock_irqrestore(&pool->lock, flags); } EXPORT_SYMBOL(dma_pool_free);