mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
memcg: replace memcg ID idr with xarray
At the moment memcg IDs are managed through IDR which requires external synchronization mechanisms and makes the allocation code a bit awkward. Let's switch to xarray and make the code simpler. [shakeel.butt@linux.dev: fix error path in mem_cgroup_alloc(), per Dan] Link: https://lkml.kernel.org/r/20240815155402.3630804-1-shakeel.butt@linux.dev Link: https://lkml.kernel.org/r/20240809172618.2946790-1-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Suggested-by: Matthew Wilcox <willy@infradead.org> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Muchun Song <muchun.song@linux.dev> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Dan Carpenter <dan.carpenter@linaro.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
072cd213b7
commit
0722237191
@ -3363,29 +3363,12 @@ static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
|
||||
*/
|
||||
|
||||
#define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
|
||||
static DEFINE_IDR(mem_cgroup_idr);
|
||||
static DEFINE_SPINLOCK(memcg_idr_lock);
|
||||
|
||||
static int mem_cgroup_alloc_id(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&memcg_idr_lock);
|
||||
ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1,
|
||||
GFP_NOWAIT);
|
||||
spin_unlock(&memcg_idr_lock);
|
||||
idr_preload_end();
|
||||
return ret;
|
||||
}
|
||||
static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids);
|
||||
|
||||
static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
|
||||
{
|
||||
if (memcg->id.id > 0) {
|
||||
spin_lock(&memcg_idr_lock);
|
||||
idr_remove(&mem_cgroup_idr, memcg->id.id);
|
||||
spin_unlock(&memcg_idr_lock);
|
||||
|
||||
xa_erase(&mem_cgroup_ids, memcg->id.id);
|
||||
memcg->id.id = 0;
|
||||
}
|
||||
}
|
||||
@ -3420,7 +3403,7 @@ static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
|
||||
struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
return idr_find(&mem_cgroup_idr, id);
|
||||
return xa_load(&mem_cgroup_ids, id);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SHRINKER_DEBUG
|
||||
@ -3513,17 +3496,17 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent)
|
||||
struct mem_cgroup *memcg;
|
||||
int node, cpu;
|
||||
int __maybe_unused i;
|
||||
long error = -ENOMEM;
|
||||
long error;
|
||||
|
||||
memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
|
||||
if (!memcg)
|
||||
return ERR_PTR(error);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
memcg->id.id = mem_cgroup_alloc_id();
|
||||
if (memcg->id.id < 0) {
|
||||
error = memcg->id.id;
|
||||
error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
|
||||
XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL);
|
||||
if (error)
|
||||
goto fail;
|
||||
}
|
||||
error = -ENOMEM;
|
||||
|
||||
memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
@ -3664,9 +3647,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
|
||||
* publish it here at the end of onlining. This matches the
|
||||
* regular ID destruction during offlining.
|
||||
*/
|
||||
spin_lock(&memcg_idr_lock);
|
||||
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
|
||||
spin_unlock(&memcg_idr_lock);
|
||||
xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
|
||||
|
||||
return 0;
|
||||
offline_kmem:
|
||||
|
Loading…
Reference in New Issue
Block a user