mm/memmap: prevent double scanning of memmap by kmemleak

kmemleak explicitly scans the mem_map through the valid struct page
objects.  However, memmap_alloc() was also adding this memory to the gray
object list, causing it to be scanned twice.  Remove memmap_alloc() from
the scan list and add a comment to clarify the behavior.

Link: https://lore.kernel.org/lkml/CAOm6qn=FVeTpH54wGDFMHuCOeYtvoTx30ktnv9-w3Nh8RMofEA@mail.gmail.com/
Link: https://lkml.kernel.org/r/20250106021126.1678334-1-guoweikang.kernel@gmail.com
Signed-off-by: Guo Weikang <guoweikang.kernel@gmail.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Guo Weikang 2025-01-06 10:11:25 +08:00 committed by Andrew Morton
parent ce8e0193e7
commit 74eb038f38
3 changed files with 13 additions and 4 deletions

View File

@ -378,6 +378,10 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
/*
* MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
* MEMBLOCK_ALLOC_ACCESSIBLE
*/
#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
/* We are using top down, so it is safe to use 0 here */

View File

@ -1585,13 +1585,17 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
{
void *ptr;
/*
* Kmemleak will explicitly scan mem_map by traversing all valid
* `struct *page`,so memblock does not need to be added to the scan list.
*/
if (exact_nid)
ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
MEMBLOCK_ALLOC_ACCESSIBLE,
MEMBLOCK_ALLOC_NOLEAKTRACE,
nid);
else
ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
MEMBLOCK_ALLOC_ACCESSIBLE,
MEMBLOCK_ALLOC_NOLEAKTRACE,
nid);
if (ptr && size > 0)

View File

@ -31,6 +31,8 @@
#include <asm/dma.h>
#include <asm/pgalloc.h>
#include "internal.h"
/*
* Allocate a block of memory to be used to back the virtual memory map
* or to back the page tables that are used to create the mapping.
@ -42,8 +44,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
unsigned long align,
unsigned long goal)
{
return memblock_alloc_try_nid_raw(size, align, goal,
MEMBLOCK_ALLOC_ACCESSIBLE, node);
return memmap_alloc(size, align, goal, node, false);
}
void * __meminit vmemmap_alloc_block(unsigned long size, int node)