mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-19 20:12:32 +00:00
mm: memcg: rename memcg_check_events()
Rename memcg_check_events() into memcg1_check_events() for consistency with other cgroup v1-specific functions. Link: https://lkml.kernel.org/r/20240625005906.106920-8-roman.gushchin@linux.dev Signed-off-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
66d60c428b
commit
cc7b8504f6
@ -835,9 +835,9 @@ static int mem_cgroup_move_account(struct folio *folio,
|
||||
|
||||
local_irq_disable();
|
||||
mem_cgroup_charge_statistics(to, nr_pages);
|
||||
memcg_check_events(to, nid);
|
||||
memcg1_check_events(to, nid);
|
||||
mem_cgroup_charge_statistics(from, -nr_pages);
|
||||
memcg_check_events(from, nid);
|
||||
memcg1_check_events(from, nid);
|
||||
local_irq_enable();
|
||||
out:
|
||||
return ret;
|
||||
@ -1424,7 +1424,7 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg)
|
||||
* Check events in order.
|
||||
*
|
||||
*/
|
||||
void memcg_check_events(struct mem_cgroup *memcg, int nid)
|
||||
void memcg1_check_events(struct mem_cgroup *memcg, int nid)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
return;
|
||||
|
@ -12,7 +12,7 @@ static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg)
|
||||
}
|
||||
|
||||
void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages);
|
||||
void memcg_check_events(struct mem_cgroup *memcg, int nid);
|
||||
void memcg1_check_events(struct mem_cgroup *memcg, int nid);
|
||||
void memcg_oom_recover(struct mem_cgroup *memcg);
|
||||
int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
|
||||
unsigned int nr_pages);
|
||||
|
@ -2630,7 +2630,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg)
|
||||
|
||||
local_irq_disable();
|
||||
mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio));
|
||||
memcg_check_events(memcg, folio_nid(folio));
|
||||
memcg1_check_events(memcg, folio_nid(folio));
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
@ -5662,7 +5662,7 @@ static void uncharge_batch(const struct uncharge_gather *ug)
|
||||
local_irq_save(flags);
|
||||
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
|
||||
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
|
||||
memcg_check_events(ug->memcg, ug->nid);
|
||||
memcg1_check_events(ug->memcg, ug->nid);
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* drop reference from uncharge_folio */
|
||||
@ -5801,7 +5801,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new)
|
||||
|
||||
local_irq_save(flags);
|
||||
mem_cgroup_charge_statistics(memcg, nr_pages);
|
||||
memcg_check_events(memcg, folio_nid(new));
|
||||
memcg1_check_events(memcg, folio_nid(new));
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -6070,7 +6070,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
|
||||
memcg_stats_lock();
|
||||
mem_cgroup_charge_statistics(memcg, -nr_entries);
|
||||
memcg_stats_unlock();
|
||||
memcg_check_events(memcg, folio_nid(folio));
|
||||
memcg1_check_events(memcg, folio_nid(folio));
|
||||
|
||||
css_put(&memcg->css);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user