mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
mm, slab: move slab_memcg hooks to mm/memcontrol.c
The hooks make multiple calls to functions in mm/memcontrol.c, including to th current_obj_cgroup() marked __always_inline. It might be faster to make a single call to the hook in mm/memcontrol.c instead. The hooks also don't use almost anything from mm/slub.c. obj_full_size() can move with the hooks and cache_vmstat_idx() to the internal mm/slab.h Link: https://lkml.kernel.org/r/20240326-slab-memcg-v3-2-d85d2563287a@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev> Cc: Al Viro <viro@ZenIV.linux.org.uk> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Christian Brauner <brauner@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: David Rientjes <rientjes@google.com> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Jan Kara <jack@suse.cz> Cc: Jeff Layton <jlayton@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Kees Cook <kees@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Pekka Enberg <penberg@kernel.org> Cc: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
9f9796b413
commit
e6100a4590
@ -3556,6 +3556,96 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
|
||||
refill_obj_stock(objcg, size, true);
|
||||
}
|
||||
|
||||
static inline size_t obj_full_size(struct kmem_cache *s)
|
||||
{
|
||||
/*
|
||||
* For each accounted object there is an extra space which is used
|
||||
* to store obj_cgroup membership. Charge it too.
|
||||
*/
|
||||
return s->size + sizeof(struct obj_cgroup *);
|
||||
}
|
||||
|
||||
bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
|
||||
gfp_t flags, size_t size, void **p)
|
||||
{
|
||||
struct obj_cgroup *objcg;
|
||||
struct slab *slab;
|
||||
unsigned long off;
|
||||
size_t i;
|
||||
|
||||
/*
|
||||
* The obtained objcg pointer is safe to use within the current scope,
|
||||
* defined by current task or set_active_memcg() pair.
|
||||
* obj_cgroup_get() is used to get a permanent reference.
|
||||
*/
|
||||
objcg = current_obj_cgroup();
|
||||
if (!objcg)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* slab_alloc_node() avoids the NULL check, so we might be called with a
|
||||
* single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
|
||||
* the whole requested size.
|
||||
* return success as there's nothing to free back
|
||||
*/
|
||||
if (unlikely(*p == NULL))
|
||||
return true;
|
||||
|
||||
flags &= gfp_allowed_mask;
|
||||
|
||||
if (lru) {
|
||||
int ret;
|
||||
struct mem_cgroup *memcg;
|
||||
|
||||
memcg = get_mem_cgroup_from_objcg(objcg);
|
||||
ret = memcg_list_lru_alloc(memcg, lru, flags);
|
||||
css_put(&memcg->css);
|
||||
|
||||
if (ret)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
slab = virt_to_slab(p[i]);
|
||||
|
||||
if (!slab_obj_exts(slab) &&
|
||||
alloc_slab_obj_exts(slab, s, flags, false)) {
|
||||
obj_cgroup_uncharge(objcg, obj_full_size(s));
|
||||
continue;
|
||||
}
|
||||
|
||||
off = obj_to_index(s, slab, p[i]);
|
||||
obj_cgroup_get(objcg);
|
||||
slab_obj_exts(slab)[off].objcg = objcg;
|
||||
mod_objcg_state(objcg, slab_pgdat(slab),
|
||||
cache_vmstat_idx(s), obj_full_size(s));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
|
||||
void **p, int objects, struct slabobj_ext *obj_exts)
|
||||
{
|
||||
for (int i = 0; i < objects; i++) {
|
||||
struct obj_cgroup *objcg;
|
||||
unsigned int off;
|
||||
|
||||
off = obj_to_index(s, slab, p[i]);
|
||||
objcg = obj_exts[off].objcg;
|
||||
if (!objcg)
|
||||
continue;
|
||||
|
||||
obj_exts[off].objcg = NULL;
|
||||
obj_cgroup_uncharge(objcg, obj_full_size(s));
|
||||
mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
|
||||
-obj_full_size(s));
|
||||
obj_cgroup_put(objcg);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_MEMCG_KMEM */
|
||||
|
||||
/*
|
||||
|
13
mm/slab.h
13
mm/slab.h
@ -558,6 +558,9 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
|
||||
return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
|
||||
}
|
||||
|
||||
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
||||
gfp_t gfp, bool new_slab);
|
||||
|
||||
#else /* CONFIG_SLAB_OBJ_EXT */
|
||||
|
||||
static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
|
||||
@ -567,7 +570,17 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
|
||||
|
||||
#endif /* CONFIG_SLAB_OBJ_EXT */
|
||||
|
||||
static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
|
||||
{
|
||||
return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
||||
NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
|
||||
gfp_t flags, size_t size, void **p);
|
||||
void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
|
||||
void **p, int objects, struct slabobj_ext *obj_exts);
|
||||
void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
|
||||
enum node_stat_item idx, int nr);
|
||||
#endif
|
||||
|
103
mm/slub.c
103
mm/slub.c
@ -1859,12 +1859,6 @@ static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
|
||||
#endif
|
||||
#endif /* CONFIG_SLUB_DEBUG */
|
||||
|
||||
static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
|
||||
{
|
||||
return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
|
||||
NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SLAB_OBJ_EXT
|
||||
|
||||
#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
|
||||
@ -1923,8 +1917,8 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
|
||||
#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
|
||||
__GFP_ACCOUNT | __GFP_NOFAIL)
|
||||
|
||||
static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
||||
gfp_t gfp, bool new_slab)
|
||||
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
|
||||
gfp_t gfp, bool new_slab)
|
||||
{
|
||||
unsigned int objects = objs_per_slab(s, slab);
|
||||
unsigned long new_exts;
|
||||
@ -2083,78 +2077,6 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
|
||||
#endif /* CONFIG_SLAB_OBJ_EXT */
|
||||
|
||||
#ifdef CONFIG_MEMCG_KMEM
|
||||
static inline size_t obj_full_size(struct kmem_cache *s)
|
||||
{
|
||||
/*
|
||||
* For each accounted object there is an extra space which is used
|
||||
* to store obj_cgroup membership. Charge it too.
|
||||
*/
|
||||
return s->size + sizeof(struct obj_cgroup *);
|
||||
}
|
||||
|
||||
static bool __memcg_slab_post_alloc_hook(struct kmem_cache *s,
|
||||
struct list_lru *lru,
|
||||
gfp_t flags, size_t size,
|
||||
void **p)
|
||||
{
|
||||
struct obj_cgroup *objcg;
|
||||
struct slab *slab;
|
||||
unsigned long off;
|
||||
size_t i;
|
||||
|
||||
/*
|
||||
* The obtained objcg pointer is safe to use within the current scope,
|
||||
* defined by current task or set_active_memcg() pair.
|
||||
* obj_cgroup_get() is used to get a permanent reference.
|
||||
*/
|
||||
objcg = current_obj_cgroup();
|
||||
if (!objcg)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* slab_alloc_node() avoids the NULL check, so we might be called with a
|
||||
* single NULL object. kmem_cache_alloc_bulk() aborts if it can't fill
|
||||
* the whole requested size.
|
||||
* return success as there's nothing to free back
|
||||
*/
|
||||
if (unlikely(*p == NULL))
|
||||
return true;
|
||||
|
||||
flags &= gfp_allowed_mask;
|
||||
|
||||
if (lru) {
|
||||
int ret;
|
||||
struct mem_cgroup *memcg;
|
||||
|
||||
memcg = get_mem_cgroup_from_objcg(objcg);
|
||||
ret = memcg_list_lru_alloc(memcg, lru, flags);
|
||||
css_put(&memcg->css);
|
||||
|
||||
if (ret)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (obj_cgroup_charge(objcg, flags, size * obj_full_size(s)))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
slab = virt_to_slab(p[i]);
|
||||
|
||||
if (!slab_obj_exts(slab) &&
|
||||
alloc_slab_obj_exts(slab, s, flags, false)) {
|
||||
obj_cgroup_uncharge(objcg, obj_full_size(s));
|
||||
continue;
|
||||
}
|
||||
|
||||
off = obj_to_index(s, slab, p[i]);
|
||||
obj_cgroup_get(objcg);
|
||||
slab_obj_exts(slab)[off].objcg = objcg;
|
||||
mod_objcg_state(objcg, slab_pgdat(slab),
|
||||
cache_vmstat_idx(s), obj_full_size(s));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
|
||||
|
||||
@ -2181,27 +2103,6 @@ bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
|
||||
void **p, int objects,
|
||||
struct slabobj_ext *obj_exts)
|
||||
{
|
||||
for (int i = 0; i < objects; i++) {
|
||||
struct obj_cgroup *objcg;
|
||||
unsigned int off;
|
||||
|
||||
off = obj_to_index(s, slab, p[i]);
|
||||
objcg = obj_exts[off].objcg;
|
||||
if (!objcg)
|
||||
continue;
|
||||
|
||||
obj_exts[off].objcg = NULL;
|
||||
obj_cgroup_uncharge(objcg, obj_full_size(s));
|
||||
mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s),
|
||||
-obj_full_size(s));
|
||||
obj_cgroup_put(objcg);
|
||||
}
|
||||
}
|
||||
|
||||
static __fastpath_inline
|
||||
void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
|
||||
int objects)
|
||||
|
Loading…
Reference in New Issue
Block a user