mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
mm: shmem: fix ShmemHugePages at swapout
/proc/meminfo ShmemHugePages has been showing overlarge amounts (more than
Shmem) after swapping out THPs: we forgot to update NR_SHMEM_THPS.
Add shmem_update_stats(), to avoid repetition, and risk of making that
mistake again: the call from shmem_delete_from_page_cache() is the bugfix;
the call from shmem_replace_folio() is reassuring, but not really a bugfix
(replace corrects misplaced swapin readahead, but huge swapin readahead
would be a mistake).
Link: https://lkml.kernel.org/r/5ba477c8-a569-70b5-923e-09ab221af45b@google.com
Fixes: 809bc86517
("mm: shmem: support large folio swap out")
Signed-off-by: Hugh Dickins <hughd@google.com>
Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7782e3b3b0
commit
dad2dc9c92
22
mm/shmem.c
22
mm/shmem.c
@ -787,6 +787,14 @@ static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
|
||||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
static void shmem_update_stats(struct folio *folio, int nr_pages)
|
||||
{
|
||||
if (folio_test_pmd_mappable(folio))
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Somewhat like filemap_add_folio, but error if expected item has gone.
|
||||
*/
|
||||
@ -821,10 +829,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
|
||||
xas_store(&xas, folio);
|
||||
if (xas_error(&xas))
|
||||
goto unlock;
|
||||
if (folio_test_pmd_mappable(folio))
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM, nr);
|
||||
shmem_update_stats(folio, nr);
|
||||
mapping->nrpages += nr;
|
||||
unlock:
|
||||
xas_unlock_irq(&xas);
|
||||
@ -852,8 +857,7 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
|
||||
error = shmem_replace_entry(mapping, folio->index, folio, radswap);
|
||||
folio->mapping = NULL;
|
||||
mapping->nrpages -= nr;
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
|
||||
shmem_update_stats(folio, -nr);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
folio_put_refs(folio, nr);
|
||||
BUG_ON(error);
|
||||
@ -1969,10 +1973,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
|
||||
}
|
||||
if (!error) {
|
||||
mem_cgroup_replace_folio(old, new);
|
||||
__lruvec_stat_mod_folio(new, NR_FILE_PAGES, nr_pages);
|
||||
__lruvec_stat_mod_folio(new, NR_SHMEM, nr_pages);
|
||||
__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -nr_pages);
|
||||
__lruvec_stat_mod_folio(old, NR_SHMEM, -nr_pages);
|
||||
shmem_update_stats(new, nr_pages);
|
||||
shmem_update_stats(old, -nr_pages);
|
||||
}
|
||||
xa_unlock_irq(&swap_mapping->i_pages);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user