mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
mm, slab: don't wrap internal functions with alloc_hooks()
The functions __kmalloc_noprof(), kmalloc_large_noprof(), kmalloc_trace_noprof() and their _node variants are all internal to the implementations of kmalloc_noprof() and kmalloc_node_noprof() and are only declared in the "public" slab.h and exported so that those implementations can be static inline and distinguish the build-time constant size variants. The only other users for some of the internal functions are slub_kunit and fortify_kunit tests which make very short-lived allocations. Therefore we can stop wrapping them with the alloc_hooks() macro. Instead add a __ prefix to all of them and a comment documenting these as internal. Also rename __kmalloc_trace() to __kmalloc_cache() which is more descriptive - it is a variant of __kmalloc() where the exact kmalloc cache has been already determined. The usage in fortify_kunit can be removed completely, as the internal functions should be tested already through kmalloc() tests in the test variant that passes non-constant allocation size. Reported-by: Kent Overstreet <kent.overstreet@linux.dev> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Kees Cook <keescook@chromium.org> Reviewed-by: Kent Overstreet <kent.overstreet@linux.dev> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
1613e604df
commit
a0a44d9175
@ -528,9 +528,6 @@ static_assert(PAGE_SHIFT <= 20);
|
||||
|
||||
#include <linux/alloc_tag.h>
|
||||
|
||||
void *__kmalloc_noprof(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
|
||||
#define __kmalloc(...) alloc_hooks(__kmalloc_noprof(__VA_ARGS__))
|
||||
|
||||
/**
|
||||
* kmem_cache_alloc - Allocate an object
|
||||
* @cachep: The cache to allocate from.
|
||||
@ -568,31 +565,34 @@ static __always_inline void kfree_bulk(size_t size, void **p)
|
||||
kmem_cache_free_bulk(NULL, size, p);
|
||||
}
|
||||
|
||||
void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
|
||||
__alloc_size(1);
|
||||
#define __kmalloc_node(...) alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__))
|
||||
|
||||
void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
|
||||
int node) __assume_slab_alignment __malloc;
|
||||
#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
|
||||
|
||||
void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
|
||||
__assume_kmalloc_alignment __alloc_size(3);
|
||||
/*
|
||||
* The following functions are not to be used directly and are intended only
|
||||
* for internal use from kmalloc() and kmalloc_node()
|
||||
* with the exception of kunit tests
|
||||
*/
|
||||
|
||||
void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
|
||||
int node, size_t size) __assume_kmalloc_alignment
|
||||
__alloc_size(4);
|
||||
#define kmalloc_trace(...) alloc_hooks(kmalloc_trace_noprof(__VA_ARGS__))
|
||||
void *__kmalloc_noprof(size_t size, gfp_t flags)
|
||||
__assume_kmalloc_alignment __alloc_size(1);
|
||||
|
||||
#define kmalloc_node_trace(...) alloc_hooks(kmalloc_node_trace_noprof(__VA_ARGS__))
|
||||
void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node)
|
||||
__assume_kmalloc_alignment __alloc_size(1);
|
||||
|
||||
void *kmalloc_large_noprof(size_t size, gfp_t flags) __assume_page_alignment
|
||||
__alloc_size(1);
|
||||
#define kmalloc_large(...) alloc_hooks(kmalloc_large_noprof(__VA_ARGS__))
|
||||
void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
|
||||
__assume_kmalloc_alignment __alloc_size(3);
|
||||
|
||||
void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_page_alignment
|
||||
__alloc_size(1);
|
||||
#define kmalloc_large_node(...) alloc_hooks(kmalloc_large_node_noprof(__VA_ARGS__))
|
||||
void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
|
||||
int node, size_t size)
|
||||
__assume_kmalloc_alignment __alloc_size(4);
|
||||
|
||||
void *__kmalloc_large_noprof(size_t size, gfp_t flags)
|
||||
__assume_page_alignment __alloc_size(1);
|
||||
|
||||
void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
|
||||
__assume_page_alignment __alloc_size(1);
|
||||
|
||||
/**
|
||||
* kmalloc - allocate kernel memory
|
||||
@ -654,10 +654,10 @@ static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
|
||||
unsigned int index;
|
||||
|
||||
if (size > KMALLOC_MAX_CACHE_SIZE)
|
||||
return kmalloc_large_noprof(size, flags);
|
||||
return __kmalloc_large_noprof(size, flags);
|
||||
|
||||
index = kmalloc_index(size);
|
||||
return kmalloc_trace_noprof(
|
||||
return __kmalloc_cache_noprof(
|
||||
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
|
||||
flags, size);
|
||||
}
|
||||
@ -671,10 +671,10 @@ static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gf
|
||||
unsigned int index;
|
||||
|
||||
if (size > KMALLOC_MAX_CACHE_SIZE)
|
||||
return kmalloc_large_node_noprof(size, flags, node);
|
||||
return __kmalloc_large_node_noprof(size, flags, node);
|
||||
|
||||
index = kmalloc_index(size);
|
||||
return kmalloc_node_trace_noprof(
|
||||
return __kmalloc_cache_node_noprof(
|
||||
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
|
||||
flags, node, size);
|
||||
}
|
||||
|
@ -233,11 +233,6 @@ static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
|
||||
kfree(p)); \
|
||||
checker(expected_size, \
|
||||
kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
|
||||
kfree(p)); \
|
||||
checker(expected_size, __kmalloc(alloc_size, gfp), \
|
||||
kfree(p)); \
|
||||
checker(expected_size, \
|
||||
__kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
|
||||
kfree(p)); \
|
||||
\
|
||||
orig = kmalloc(alloc_size, gfp); \
|
||||
|
@ -140,7 +140,7 @@ static void test_kmalloc_redzone_access(struct kunit *test)
|
||||
{
|
||||
struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
|
||||
SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
|
||||
u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
|
||||
u8 *p = __kmalloc_cache_noprof(s, GFP_KERNEL, 18);
|
||||
|
||||
kasan_disable_current();
|
||||
|
||||
|
26
mm/slub.c
26
mm/slub.c
@ -4053,7 +4053,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
|
||||
* directly to the page allocator. We use __GFP_COMP, because we will need to
|
||||
* know the allocation order to free the pages properly in kfree.
|
||||
*/
|
||||
static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
|
||||
static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
struct folio *folio;
|
||||
void *ptr = NULL;
|
||||
@ -4078,25 +4078,25 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *kmalloc_large_noprof(size_t size, gfp_t flags)
|
||||
void *__kmalloc_large_noprof(size_t size, gfp_t flags)
|
||||
{
|
||||
void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
|
||||
void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
|
||||
|
||||
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
|
||||
flags, NUMA_NO_NODE);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(kmalloc_large_noprof);
|
||||
EXPORT_SYMBOL(__kmalloc_large_noprof);
|
||||
|
||||
void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
|
||||
void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
void *ret = __kmalloc_large_node(size, flags, node);
|
||||
void *ret = ___kmalloc_large_node(size, flags, node);
|
||||
|
||||
trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
|
||||
flags, node);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(kmalloc_large_node_noprof);
|
||||
EXPORT_SYMBOL(__kmalloc_large_node_noprof);
|
||||
|
||||
static __always_inline
|
||||
void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
|
||||
@ -4106,7 +4106,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
|
||||
void *ret;
|
||||
|
||||
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
|
||||
ret = __kmalloc_large_node(size, flags, node);
|
||||
ret = __kmalloc_large_node_noprof(size, flags, node);
|
||||
trace_kmalloc(caller, ret, size,
|
||||
PAGE_SIZE << get_order(size), flags, node);
|
||||
return ret;
|
||||
@ -4142,7 +4142,7 @@ void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags,
|
||||
}
|
||||
EXPORT_SYMBOL(kmalloc_node_track_caller_noprof);
|
||||
|
||||
void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
|
||||
void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
|
||||
{
|
||||
void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
|
||||
_RET_IP_, size);
|
||||
@ -4152,10 +4152,10 @@ void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
|
||||
ret = kasan_kmalloc(s, ret, size, gfpflags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(kmalloc_trace_noprof);
|
||||
EXPORT_SYMBOL(__kmalloc_cache_noprof);
|
||||
|
||||
void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
|
||||
int node, size_t size)
|
||||
void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
|
||||
int node, size_t size)
|
||||
{
|
||||
void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
|
||||
|
||||
@ -4164,7 +4164,7 @@ void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
|
||||
ret = kasan_kmalloc(s, ret, size, gfpflags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(kmalloc_node_trace_noprof);
|
||||
EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
|
||||
|
||||
static noinline void free_to_partial_list(
|
||||
struct kmem_cache *s, struct slab *slab,
|
||||
|
Loading…
Reference in New Issue
Block a user