mm: add per-order mTHP swpin counters

This helps profile the sizes of folios being swapped in. Currently,
only mTHP swap-out is being counted.
The new interface can be found at:
/sys/kernel/mm/transparent_hugepage/hugepages-<size>/stats
         swpin
For example,
cat /sys/kernel/mm/transparent_hugepage/hugepages-64kB/stats/swpin
12809
cat /sys/kernel/mm/transparent_hugepage/hugepages-32kB/stats/swpin
4763

[v-songbaohua@oppo.com: add a blank line in doc]
  Link: https://lkml.kernel.org/r/20241030233423.80759-1-21cnbao@gmail.com
Link: https://lkml.kernel.org/r/20241026082423.26298-1-21cnbao@gmail.com
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Kanchana P Sridhar <kanchana.p.sridhar@intel.com>
Cc: Usama Arif <usamaarif642@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Barry Song 2024-10-26 21:24:23 +13:00 committed by Andrew Morton
parent ed882add6d
commit aaf2914aec
4 changed files with 11 additions and 0 deletions

View File

@ -534,6 +534,10 @@ zswpout
is incremented every time a huge page is swapped out to zswap in one
piece without splitting.
swpin
is incremented every time a huge page is swapped in from a non-zswap
swap device in one piece.
swpout
is incremented every time a huge page is swapped out to a non-zswap
swap device in one piece without splitting.

View File

@ -120,6 +120,7 @@ enum mthp_stat_item {
MTHP_STAT_ANON_FAULT_FALLBACK,
MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
MTHP_STAT_ZSWPOUT,
MTHP_STAT_SWPIN,
MTHP_STAT_SWPOUT,
MTHP_STAT_SWPOUT_FALLBACK,
MTHP_STAT_SHMEM_ALLOC,

View File

@ -616,6 +616,7 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT);
DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN);
DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT);
DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
#ifdef CONFIG_SHMEM
@ -635,6 +636,7 @@ static struct attribute *anon_stats_attrs[] = {
&anon_fault_fallback_charge_attr.attr,
#ifndef CONFIG_SHMEM
&zswpout_attr.attr,
&swpin_attr.attr,
&swpout_attr.attr,
&swpout_fallback_attr.attr,
#endif
@ -666,6 +668,7 @@ static struct attribute_group file_stats_attr_grp = {
static struct attribute *any_stats_attrs[] = {
#ifdef CONFIG_SHMEM
&zswpout_attr.attr,
&swpin_attr.attr,
&swpout_attr.attr,
&swpout_fallback_attr.attr,
#endif

View File

@ -495,6 +495,7 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
for (p = 0; p < sio->pages; p++) {
struct folio *folio = page_folio(sio->bvec[p].bv_page);
count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
folio_mark_uptodate(folio);
folio_unlock(folio);
@ -589,6 +590,7 @@ static void swap_read_folio_bdev_sync(struct folio *folio,
* attempt to access it in the page fault retry time check.
*/
get_task_struct(current);
count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
count_vm_events(PSWPIN, folio_nr_pages(folio));
submit_bio_wait(&bio);
@ -605,6 +607,7 @@ static void swap_read_folio_bdev_async(struct folio *folio,
bio->bi_iter.bi_sector = swap_folio_sector(folio);
bio->bi_end_io = end_swap_bio_read;
bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
count_mthp_stat(folio_order(folio), MTHP_STAT_SWPIN);
count_memcg_folio_events(folio, PSWPIN, folio_nr_pages(folio));
count_vm_events(PSWPIN, folio_nr_pages(folio));
submit_bio(bio);