mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
mm/slab: move pre/post-alloc hooks from slab.h to slub.c
We don't share the hooks between two slab implementations anymore so they can be moved away from the header. As part of the move, also move should_failslab() from slab_common.c as the pre_alloc hook uses it. This means slab.h can stop including fault-inject.h and kmemleak.h. Fix up some files that were depending on the includes transitively. Reviewed-by: Kees Cook <keescook@chromium.org> Acked-by: David Rientjes <rientjes@google.com> Tested-by: David Rientjes <rientjes@google.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
89c2d061bf
commit
6011be5991
@ -23,6 +23,7 @@
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
@ -64,6 +64,7 @@
|
||||
#include <linux/psi.h>
|
||||
#include <linux/seq_buf.h>
|
||||
#include <linux/sched/isolation.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include "internal.h"
|
||||
#include <net/sock.h>
|
||||
#include <net/ip.h>
|
||||
|
72
mm/slab.h
72
mm/slab.h
@ -9,8 +9,6 @@
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/memcontrol.h>
|
||||
#include <linux/fault-inject.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/kfence.h>
|
||||
#include <linux/kasan.h>
|
||||
|
||||
@ -796,76 +794,6 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
|
||||
return s->size;
|
||||
}
|
||||
|
||||
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
|
||||
struct list_lru *lru,
|
||||
struct obj_cgroup **objcgp,
|
||||
size_t size, gfp_t flags)
|
||||
{
|
||||
flags &= gfp_allowed_mask;
|
||||
|
||||
might_alloc(flags);
|
||||
|
||||
if (should_failslab(s, flags))
|
||||
return NULL;
|
||||
|
||||
if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
|
||||
return NULL;
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
static inline void slab_post_alloc_hook(struct kmem_cache *s,
|
||||
struct obj_cgroup *objcg, gfp_t flags,
|
||||
size_t size, void **p, bool init,
|
||||
unsigned int orig_size)
|
||||
{
|
||||
unsigned int zero_size = s->object_size;
|
||||
bool kasan_init = init;
|
||||
size_t i;
|
||||
|
||||
flags &= gfp_allowed_mask;
|
||||
|
||||
/*
|
||||
* For kmalloc object, the allocated memory size(object_size) is likely
|
||||
* larger than the requested size(orig_size). If redzone check is
|
||||
* enabled for the extra space, don't zero it, as it will be redzoned
|
||||
* soon. The redzone operation for this extra space could be seen as a
|
||||
* replacement of current poisoning under certain debug option, and
|
||||
* won't break other sanity checks.
|
||||
*/
|
||||
if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
|
||||
(s->flags & SLAB_KMALLOC))
|
||||
zero_size = orig_size;
|
||||
|
||||
/*
|
||||
* When slub_debug is enabled, avoid memory initialization integrated
|
||||
* into KASAN and instead zero out the memory via the memset below with
|
||||
* the proper size. Otherwise, KASAN might overwrite SLUB redzones and
|
||||
* cause false-positive reports. This does not lead to a performance
|
||||
* penalty on production builds, as slub_debug is not intended to be
|
||||
* enabled there.
|
||||
*/
|
||||
if (__slub_debug_enabled())
|
||||
kasan_init = false;
|
||||
|
||||
/*
|
||||
* As memory initialization might be integrated into KASAN,
|
||||
* kasan_slab_alloc and initialization memset must be
|
||||
* kept together to avoid discrepancies in behavior.
|
||||
*
|
||||
* As p[i] might get tagged, memset and kmemleak hook come after KASAN.
|
||||
*/
|
||||
for (i = 0; i < size; i++) {
|
||||
p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
|
||||
if (p[i] && init && (!kasan_init || !kasan_has_integrated_init()))
|
||||
memset(p[i], 0, zero_size);
|
||||
kmemleak_alloc_recursive(p[i], s->object_size, 1,
|
||||
s->flags, flags);
|
||||
kmsan_slab_alloc(s, p[i], flags);
|
||||
}
|
||||
|
||||
memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
|
||||
}
|
||||
|
||||
/*
|
||||
* The slab lists for all objects.
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -1470,10 +1471,3 @@ EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
|
||||
EXPORT_TRACEPOINT_SYMBOL(kfree);
|
||||
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
|
||||
|
||||
int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
|
||||
{
|
||||
if (__should_failslab(s, gfpflags))
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
|
||||
|
81
mm/slub.c
81
mm/slub.c
@ -34,6 +34,7 @@
|
||||
#include <linux/memory.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/fault-inject.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/memcontrol.h>
|
||||
@ -3494,6 +3495,86 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
|
||||
0, sizeof(void *));
|
||||
}
|
||||
|
||||
noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
|
||||
{
|
||||
if (__should_failslab(s, gfpflags))
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
|
||||
|
||||
static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
|
||||
struct list_lru *lru,
|
||||
struct obj_cgroup **objcgp,
|
||||
size_t size, gfp_t flags)
|
||||
{
|
||||
flags &= gfp_allowed_mask;
|
||||
|
||||
might_alloc(flags);
|
||||
|
||||
if (should_failslab(s, flags))
|
||||
return NULL;
|
||||
|
||||
if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
|
||||
return NULL;
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
static inline void slab_post_alloc_hook(struct kmem_cache *s,
|
||||
struct obj_cgroup *objcg, gfp_t flags,
|
||||
size_t size, void **p, bool init,
|
||||
unsigned int orig_size)
|
||||
{
|
||||
unsigned int zero_size = s->object_size;
|
||||
bool kasan_init = init;
|
||||
size_t i;
|
||||
|
||||
flags &= gfp_allowed_mask;
|
||||
|
||||
/*
|
||||
* For kmalloc object, the allocated memory size(object_size) is likely
|
||||
* larger than the requested size(orig_size). If redzone check is
|
||||
* enabled for the extra space, don't zero it, as it will be redzoned
|
||||
* soon. The redzone operation for this extra space could be seen as a
|
||||
* replacement of current poisoning under certain debug option, and
|
||||
* won't break other sanity checks.
|
||||
*/
|
||||
if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
|
||||
(s->flags & SLAB_KMALLOC))
|
||||
zero_size = orig_size;
|
||||
|
||||
/*
|
||||
* When slub_debug is enabled, avoid memory initialization integrated
|
||||
* into KASAN and instead zero out the memory via the memset below with
|
||||
* the proper size. Otherwise, KASAN might overwrite SLUB redzones and
|
||||
* cause false-positive reports. This does not lead to a performance
|
||||
* penalty on production builds, as slub_debug is not intended to be
|
||||
* enabled there.
|
||||
*/
|
||||
if (__slub_debug_enabled())
|
||||
kasan_init = false;
|
||||
|
||||
/*
|
||||
* As memory initialization might be integrated into KASAN,
|
||||
* kasan_slab_alloc and initialization memset must be
|
||||
* kept together to avoid discrepancies in behavior.
|
||||
*
|
||||
* As p[i] might get tagged, memset and kmemleak hook come after KASAN.
|
||||
*/
|
||||
for (i = 0; i < size; i++) {
|
||||
p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
|
||||
if (p[i] && init && (!kasan_init ||
|
||||
!kasan_has_integrated_init()))
|
||||
memset(p[i], 0, zero_size);
|
||||
kmemleak_alloc_recursive(p[i], s->object_size, 1,
|
||||
s->flags, flags);
|
||||
kmsan_slab_alloc(s, p[i], flags);
|
||||
}
|
||||
|
||||
memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
|
||||
* have the fastpath folded into their functions. So no function call
|
||||
|
Loading…
Reference in New Issue
Block a user