memcg/hugetlb: introduce mem_cgroup_charge_hugetlb

This patch introduces mem_cgroup_charge_hugetlb which combines the logic
of mem_cgroup_hugetlb_try_charge / mem_cgroup_hugetlb_commit_charge and
removes the need for mem_cgroup_hugetlb_cancel_charge.  It also reduces
the footprint of memcg in hugetlb code and consolidates all memcg related
error paths into one.

Link: https://lkml.kernel.org/r/20241211203951.764733-3-joshua.hahnjy@gmail.com
Signed-off-by: Joshua Hahn <joshua.hahnjy@gmail.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Joshua Hahn 2024-12-11 12:39:50 -08:00 committed by Andrew Morton
parent fbd3462b74
commit abfe537a39
3 changed files with 55 additions and 21 deletions

View File

@ -649,6 +649,8 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
long nr_pages);
int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp);
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
@ -1169,6 +1171,11 @@ static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
return 0;
}
static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp)
{
return 0;
}
static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{

View File

@ -2981,21 +2981,13 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
struct hugepage_subpool *spool = subpool_vma(vma);
struct hstate *h = hstate_vma(vma);
struct folio *folio;
long map_chg, map_commit, nr_pages = pages_per_huge_page(h);
long map_chg, map_commit;
long gbl_chg;
int memcg_charge_ret, ret, idx;
int ret, idx;
struct hugetlb_cgroup *h_cg = NULL;
struct mem_cgroup *memcg;
bool deferred_reserve;
gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
memcg = get_mem_cgroup_from_current();
memcg_charge_ret = mem_cgroup_hugetlb_try_charge(memcg, gfp, nr_pages);
if (memcg_charge_ret == -ENOMEM) {
mem_cgroup_put(memcg);
return ERR_PTR(-ENOMEM);
}
idx = hstate_index(h);
/*
* Examine the region/reserve map to determine if the process
@ -3003,12 +2995,8 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
* code of zero indicates a reservation exists (no change).
*/
map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
if (map_chg < 0) {
if (!memcg_charge_ret)
mem_cgroup_cancel_charge(memcg, nr_pages);
mem_cgroup_put(memcg);
if (map_chg < 0)
return ERR_PTR(-ENOMEM);
}
/*
* Processes that did not create the mapping will have no
@ -3106,10 +3094,18 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
}
}
if (!memcg_charge_ret)
mem_cgroup_commit_charge(folio, memcg);
ret = mem_cgroup_charge_hugetlb(folio, gfp);
/*
* Unconditionally increment NR_HUGETLB here. If it turns out that
* mem_cgroup_charge_hugetlb failed, then immediately free the page and
* decrement NR_HUGETLB.
*/
lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h));
mem_cgroup_put(memcg);
if (ret == -ENOMEM) {
free_huge_folio(folio);
return ERR_PTR(-ENOMEM);
}
return folio;
@ -3124,9 +3120,6 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
hugepage_subpool_put_pages(spool, 1);
out_end_reservation:
vma_end_reservation(h, vma, addr);
if (!memcg_charge_ret)
mem_cgroup_cancel_charge(memcg, nr_pages);
mem_cgroup_put(memcg);
return ERR_PTR(-ENOSPC);
}

View File

@ -4561,6 +4561,40 @@ int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
return 0;
}
/**
* mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
* @folio: folio being charged
* @gfp: reclaim mode
*
* This function is called when allocating a huge page folio, after the page has
* already been obtained and charged to the appropriate hugetlb cgroup
* controller (if it is enabled).
*
* Returns ENOMEM if the memcg is already full.
* Returns 0 if either the charge was successful, or if we skip the charging.
*/
int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp)
{
struct mem_cgroup *memcg = get_mem_cgroup_from_current();
int ret = 0;
/*
* Even memcg does not account for hugetlb, we still want to update
* system-level stats via lruvec_stat_mod_folio. Return 0, and skip
* charging the memcg.
*/
if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() ||
!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
goto out;
if (charge_memcg(folio, memcg, gfp))
ret = -ENOMEM;
out:
mem_cgroup_put(memcg);
return ret;
}
/**
* mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
* @folio: folio to charge.