mm/codetag: add pgalloc_tag_copy()

Add pgalloc_tag_copy() to transfer the codetag from the old folio to the
new one during migration.  This makes original allocation sites persist
cross migration rather than lump into the get_new_folio callbacks passed
into migrate_pages(), e.g., compaction_alloc():

  # echo 1 >/proc/sys/vm/compact_memory
  # grep compaction_alloc /proc/allocinfo

Before this patch:
  132968448  32463  mm/compaction.c:1880 func:compaction_alloc

After this patch:
          0      0  mm/compaction.c:1880 func:compaction_alloc

Link: https://lkml.kernel.org/r/20240906042108.1150526-3-yuzhao@google.com
Fixes: dcfe378c81 ("lib: introduce support for page allocation tagging")
Signed-off-by: Yu Zhao <yuzhao@google.com>
Acked-by: Suren Baghdasaryan <surenb@google.com>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Yu Zhao 2024-09-05 22:21:08 -06:00 committed by Andrew Morton
parent 95599ef684
commit e0a955bf7f
3 changed files with 38 additions and 14 deletions

View File

@ -137,7 +137,16 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
/* Caller should verify both ref and tag to be valid */
static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
alloc_tag_add_check(ref, tag);
if (!ref || !tag)
return;
ref->ct = &tag->ct;
}
static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
__alloc_tag_ref_set(ref, tag);
/*
* We need in increment the call counter every time we have a new
* allocation or when we split a large allocation into smaller ones.
@ -147,22 +156,9 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag
this_cpu_inc(tag->counters->calls);
}
static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
{
alloc_tag_add_check(ref, tag);
if (!ref || !tag)
return;
__alloc_tag_ref_set(ref, tag);
}
static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
{
alloc_tag_add_check(ref, tag);
if (!ref || !tag)
return;
__alloc_tag_ref_set(ref, tag);
alloc_tag_ref_set(ref, tag);
this_cpu_add(tag->counters->bytes, bytes);
}

View File

@ -4108,10 +4108,37 @@ static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new
}
}
}
static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
{
struct alloc_tag *tag;
union codetag_ref *ref;
tag = pgalloc_tag_get(&old->page);
if (!tag)
return;
ref = get_page_tag_ref(&new->page);
if (!ref)
return;
/* Clear the old ref to the original allocation tag. */
clear_page_tag_ref(&old->page);
/* Decrement the counters of the tag on get_new_folio. */
alloc_tag_sub(ref, folio_nr_pages(new));
__alloc_tag_ref_set(ref, tag);
put_page_tag_ref(ref);
}
#else /* !CONFIG_MEM_ALLOC_PROFILING */
static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
{
}
static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
{
}
#endif /* CONFIG_MEM_ALLOC_PROFILING */
#endif /* _LINUX_MM_H */

View File

@ -743,6 +743,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
folio_set_readahead(newfolio);
folio_copy_owner(newfolio, folio);
pgalloc_tag_copy(newfolio, folio);
mem_cgroup_migrate(folio, newfolio);
}