mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
zsmalloc: rename class stat mutators
A cosmetic change. o Rename class_stat_inc() and class_stat_dec() to class_stat_add() and class_stat_sub() correspondingly. inc/dec are usually associated with +1/-1 modifications, while zsmlloc can modify stats by up to ->objs_per_zspage. Use add/sub (follow atomics naming). o Rename zs_stat_get() to class_stat_read() get() is usually associated with ref-counting and is paired with put(). zs_stat_get() simply reads class stat so rename to reflect it. (This also follows atomics naming). Link: https://lkml.kernel.org/r/20240701031140.3756345-1-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org> Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev> Cc: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
9b89e01899
commit
791abe1e42
@ -512,19 +512,19 @@ static int get_size_class_index(int size)
|
||||
return min_t(int, ZS_SIZE_CLASSES - 1, idx);
|
||||
}
|
||||
|
||||
static inline void class_stat_inc(struct size_class *class,
|
||||
int type, unsigned long cnt)
|
||||
static inline void class_stat_add(struct size_class *class, int type,
|
||||
unsigned long cnt)
|
||||
{
|
||||
class->stats.objs[type] += cnt;
|
||||
}
|
||||
|
||||
static inline void class_stat_dec(struct size_class *class,
|
||||
int type, unsigned long cnt)
|
||||
static inline void class_stat_sub(struct size_class *class, int type,
|
||||
unsigned long cnt)
|
||||
{
|
||||
class->stats.objs[type] -= cnt;
|
||||
}
|
||||
|
||||
static inline unsigned long zs_stat_get(struct size_class *class, int type)
|
||||
static inline unsigned long class_stat_read(struct size_class *class, int type)
|
||||
{
|
||||
return class->stats.objs[type];
|
||||
}
|
||||
@ -576,12 +576,12 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
|
||||
|
||||
seq_printf(s, " %5u %5u ", i, class->size);
|
||||
for (fg = ZS_INUSE_RATIO_10; fg < NR_FULLNESS_GROUPS; fg++) {
|
||||
inuse_totals[fg] += zs_stat_get(class, fg);
|
||||
seq_printf(s, "%9lu ", zs_stat_get(class, fg));
|
||||
inuse_totals[fg] += class_stat_read(class, fg);
|
||||
seq_printf(s, "%9lu ", class_stat_read(class, fg));
|
||||
}
|
||||
|
||||
obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED);
|
||||
obj_used = zs_stat_get(class, ZS_OBJS_INUSE);
|
||||
obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED);
|
||||
obj_used = class_stat_read(class, ZS_OBJS_INUSE);
|
||||
freeable = zs_can_compact(class);
|
||||
spin_unlock(&class->lock);
|
||||
|
||||
@ -686,7 +686,7 @@ static void insert_zspage(struct size_class *class,
|
||||
struct zspage *zspage,
|
||||
int fullness)
|
||||
{
|
||||
class_stat_inc(class, fullness, 1);
|
||||
class_stat_add(class, fullness, 1);
|
||||
list_add(&zspage->list, &class->fullness_list[fullness]);
|
||||
zspage->fullness = fullness;
|
||||
}
|
||||
@ -702,7 +702,7 @@ static void remove_zspage(struct size_class *class, struct zspage *zspage)
|
||||
VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
|
||||
|
||||
list_del_init(&zspage->list);
|
||||
class_stat_dec(class, fullness, 1);
|
||||
class_stat_sub(class, fullness, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -858,7 +858,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
|
||||
|
||||
cache_free_zspage(pool, zspage);
|
||||
|
||||
class_stat_dec(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
|
||||
class_stat_sub(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
|
||||
atomic_long_sub(class->pages_per_zspage, &pool->pages_allocated);
|
||||
}
|
||||
|
||||
@ -1374,7 +1374,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
|
||||
obj_malloc(pool, zspage, handle);
|
||||
/* Now move the zspage to another fullness group, if required */
|
||||
fix_fullness_group(class, zspage);
|
||||
class_stat_inc(class, ZS_OBJS_INUSE, 1);
|
||||
class_stat_add(class, ZS_OBJS_INUSE, 1);
|
||||
|
||||
goto out;
|
||||
}
|
||||
@ -1392,8 +1392,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
|
||||
newfg = get_fullness_group(class, zspage);
|
||||
insert_zspage(class, zspage, newfg);
|
||||
atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
|
||||
class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
|
||||
class_stat_inc(class, ZS_OBJS_INUSE, 1);
|
||||
class_stat_add(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
|
||||
class_stat_add(class, ZS_OBJS_INUSE, 1);
|
||||
|
||||
/* We completely set up zspage so mark them as movable */
|
||||
SetZsPageMovable(pool, zspage);
|
||||
@ -1454,7 +1454,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
|
||||
spin_lock(&class->lock);
|
||||
read_unlock(&pool->migrate_lock);
|
||||
|
||||
class_stat_dec(class, ZS_OBJS_INUSE, 1);
|
||||
class_stat_sub(class, ZS_OBJS_INUSE, 1);
|
||||
obj_free(class->size, obj);
|
||||
|
||||
fullness = fix_fullness_group(class, zspage);
|
||||
@ -1880,7 +1880,7 @@ static void async_free_zspage(struct work_struct *work)
|
||||
|
||||
class = zspage_class(pool, zspage);
|
||||
spin_lock(&class->lock);
|
||||
class_stat_dec(class, ZS_INUSE_RATIO_0, 1);
|
||||
class_stat_sub(class, ZS_INUSE_RATIO_0, 1);
|
||||
__free_zspage(pool, class, zspage);
|
||||
spin_unlock(&class->lock);
|
||||
}
|
||||
@ -1923,8 +1923,8 @@ static inline void zs_flush_migration(struct zs_pool *pool) { }
|
||||
static unsigned long zs_can_compact(struct size_class *class)
|
||||
{
|
||||
unsigned long obj_wasted;
|
||||
unsigned long obj_allocated = zs_stat_get(class, ZS_OBJS_ALLOCATED);
|
||||
unsigned long obj_used = zs_stat_get(class, ZS_OBJS_INUSE);
|
||||
unsigned long obj_allocated = class_stat_read(class, ZS_OBJS_ALLOCATED);
|
||||
unsigned long obj_used = class_stat_read(class, ZS_OBJS_INUSE);
|
||||
|
||||
if (obj_allocated <= obj_used)
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user