mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-03 19:55:31 +00:00
mm, slub: split out allocations from pre/post hooks
In the following patch we want to introduce CONFIG_SLUB_TINY allocation paths that don't use the percpu slab. To prepare, refactor the allocation functions: Split out __slab_alloc_node() from slab_alloc_node() where the former does the actual allocation and the latter calls the pre/post hooks. Analogically, split out __kmem_cache_alloc_bulk() from kmem_cache_alloc_bulk(). Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: Christoph Lameter <cl@linux.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
This commit is contained in:
parent
fa9b88e459
commit
56d5a2b9ba
130
mm/slub.c
130
mm/slub.c
@ -2907,10 +2907,10 @@ static unsigned long count_partial(struct kmem_cache_node *n,
|
||||
}
|
||||
#endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
static noinline void
|
||||
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
|
||||
{
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
|
||||
DEFAULT_RATELIMIT_BURST);
|
||||
int node;
|
||||
@ -2941,8 +2941,11 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
|
||||
pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
|
||||
node, nr_slabs, nr_objs, nr_free);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#else /* CONFIG_SLUB_DEBUG */
|
||||
static inline void
|
||||
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
|
||||
#endif
|
||||
|
||||
static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
|
||||
{
|
||||
@ -3239,45 +3242,13 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
||||
return p;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the object has been wiped upon free, make sure it's fully initialized by
|
||||
* zeroing out freelist pointer.
|
||||
*/
|
||||
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
|
||||
void *obj)
|
||||
{
|
||||
if (unlikely(slab_want_init_on_free(s)) && obj)
|
||||
memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
|
||||
0, sizeof(void *));
|
||||
}
|
||||
|
||||
/*
|
||||
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
|
||||
* have the fastpath folded into their functions. So no function call
|
||||
* overhead for requests that can be satisfied on the fastpath.
|
||||
*
|
||||
* The fastpath works by first checking if the lockless freelist can be used.
|
||||
* If not then __slab_alloc is called for slow processing.
|
||||
*
|
||||
* Otherwise we can simply pick the next object from the lockless free list.
|
||||
*/
|
||||
static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
|
||||
static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
|
||||
gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
|
||||
{
|
||||
void *object;
|
||||
struct kmem_cache_cpu *c;
|
||||
struct slab *slab;
|
||||
unsigned long tid;
|
||||
struct obj_cgroup *objcg = NULL;
|
||||
bool init = false;
|
||||
|
||||
s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
|
||||
if (!s)
|
||||
return NULL;
|
||||
|
||||
object = kfence_alloc(s, orig_size, gfpflags);
|
||||
if (unlikely(object))
|
||||
goto out;
|
||||
void *object;
|
||||
|
||||
redo:
|
||||
/*
|
||||
@ -3347,6 +3318,48 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l
|
||||
stat(s, ALLOC_FASTPATH);
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the object has been wiped upon free, make sure it's fully initialized by
|
||||
* zeroing out freelist pointer.
|
||||
*/
|
||||
static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
|
||||
void *obj)
|
||||
{
|
||||
if (unlikely(slab_want_init_on_free(s)) && obj)
|
||||
memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
|
||||
0, sizeof(void *));
|
||||
}
|
||||
|
||||
/*
|
||||
* Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
|
||||
* have the fastpath folded into their functions. So no function call
|
||||
* overhead for requests that can be satisfied on the fastpath.
|
||||
*
|
||||
* The fastpath works by first checking if the lockless freelist can be used.
|
||||
* If not then __slab_alloc is called for slow processing.
|
||||
*
|
||||
* Otherwise we can simply pick the next object from the lockless free list.
|
||||
*/
|
||||
static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
|
||||
gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
|
||||
{
|
||||
void *object;
|
||||
struct obj_cgroup *objcg = NULL;
|
||||
bool init = false;
|
||||
|
||||
s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags);
|
||||
if (!s)
|
||||
return NULL;
|
||||
|
||||
object = kfence_alloc(s, orig_size, gfpflags);
|
||||
if (unlikely(object))
|
||||
goto out;
|
||||
|
||||
object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
|
||||
|
||||
maybe_wipe_obj_freeptr(s, object);
|
||||
init = slab_want_init_on_alloc(gfpflags, s);
|
||||
|
||||
@ -3799,18 +3812,12 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free_bulk);
|
||||
|
||||
/* Note that interrupts must be enabled when calling this function. */
|
||||
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
||||
void **p)
|
||||
static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
|
||||
size_t size, void **p, struct obj_cgroup *objcg)
|
||||
{
|
||||
struct kmem_cache_cpu *c;
|
||||
int i;
|
||||
struct obj_cgroup *objcg = NULL;
|
||||
|
||||
/* memcg and kmem_cache debug support */
|
||||
s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
|
||||
if (unlikely(!s))
|
||||
return false;
|
||||
/*
|
||||
* Drain objects in the per cpu slab, while disabling local
|
||||
* IRQs, which protects against PREEMPT and interrupts
|
||||
@ -3864,18 +3871,41 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
||||
local_unlock_irq(&s->cpu_slab->lock);
|
||||
slub_put_cpu_ptr(s->cpu_slab);
|
||||
|
||||
/*
|
||||
* memcg and kmem_cache debug support and memory initialization.
|
||||
* Done outside of the IRQ disabled fastpath loop.
|
||||
*/
|
||||
slab_post_alloc_hook(s, objcg, flags, size, p,
|
||||
slab_want_init_on_alloc(flags, s));
|
||||
return i;
|
||||
|
||||
error:
|
||||
slub_put_cpu_ptr(s->cpu_slab);
|
||||
slab_post_alloc_hook(s, objcg, flags, i, p, false);
|
||||
kmem_cache_free_bulk(s, i, p);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
/* Note that interrupts must be enabled when calling this function. */
|
||||
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
||||
void **p)
|
||||
{
|
||||
int i;
|
||||
struct obj_cgroup *objcg = NULL;
|
||||
|
||||
if (!size)
|
||||
return 0;
|
||||
|
||||
/* memcg and kmem_cache debug support */
|
||||
s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
|
||||
if (unlikely(!s))
|
||||
return 0;
|
||||
|
||||
i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg);
|
||||
|
||||
/*
|
||||
* memcg and kmem_cache debug support and memory initialization.
|
||||
* Done outside of the IRQ disabled fastpath loop.
|
||||
*/
|
||||
if (i != 0)
|
||||
slab_post_alloc_hook(s, objcg, flags, size, p,
|
||||
slab_want_init_on_alloc(flags, s));
|
||||
return i;
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_alloc_bulk);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user