mm/migrate: Add folio_migrate_flags()

Turn migrate_page_states() into a wrapper around folio_migrate_flags().
Also convert two functions only called from folio_migrate_flags() to
be folio-based.  ksm_migrate_page() becomes folio_migrate_ksm() and
copy_page_owner() becomes folio_copy_owner().  folio_migrate_flags()
alone shrinks by two thirds -- 1967 bytes down to 642 bytes.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-05-07 15:26:29 -04:00
parent 3417013e0d
commit 19138349ed
7 changed files with 77 additions and 67 deletions

View File

@ -52,7 +52,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address); struct vm_area_struct *vma, unsigned long address);
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
#else /* !CONFIG_KSM */ #else /* !CONFIG_KSM */
@ -83,7 +83,7 @@ static inline void rmap_walk_ksm(struct page *page,
{ {
} }
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
{ {
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */

View File

@ -57,6 +57,7 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page); struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping, extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count); struct page *newpage, struct page *page, int extra_count);
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping, int folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int extra_count); struct folio *newfolio, struct folio *folio, int extra_count);
#else #else

View File

@ -12,7 +12,7 @@ extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __set_page_owner(struct page *page, extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask); unsigned int order, gfp_t gfp_mask);
extern void __split_page_owner(struct page *page, unsigned int nr); extern void __split_page_owner(struct page *page, unsigned int nr);
extern void __copy_page_owner(struct page *oldpage, struct page *newpage); extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason); extern void __set_page_owner_migrate_reason(struct page *page, int reason);
extern void __dump_page_owner(const struct page *page); extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m, extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@ -36,10 +36,10 @@ static inline void split_page_owner(struct page *page, unsigned int nr)
if (static_branch_unlikely(&page_owner_inited)) if (static_branch_unlikely(&page_owner_inited))
__split_page_owner(page, nr); __split_page_owner(page, nr);
} }
static inline void copy_page_owner(struct page *oldpage, struct page *newpage) static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
{ {
if (static_branch_unlikely(&page_owner_inited)) if (static_branch_unlikely(&page_owner_inited))
__copy_page_owner(oldpage, newpage); __folio_copy_owner(newfolio, old);
} }
static inline void set_page_owner_migrate_reason(struct page *page, int reason) static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{ {
@ -63,7 +63,7 @@ static inline void split_page_owner(struct page *page,
unsigned int order) unsigned int order)
{ {
} }
static inline void copy_page_owner(struct page *oldpage, struct page *newpage) static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{ {
} }
static inline void set_page_owner_migrate_reason(struct page *page, int reason) static inline void set_page_owner_migrate_reason(struct page *page, int reason)

View File

@ -58,4 +58,10 @@ int migrate_page_move_mapping(struct address_space *mapping,
page_folio(page), extra_count); page_folio(page), extra_count);
} }
EXPORT_SYMBOL(migrate_page_move_mapping); EXPORT_SYMBOL(migrate_page_move_mapping);
void migrate_page_states(struct page *newpage, struct page *page)
{
folio_migrate_flags(page_folio(newpage), page_folio(page));
}
EXPORT_SYMBOL(migrate_page_states);
#endif #endif

View File

@ -751,7 +751,7 @@ static struct page *get_ksm_page(struct stable_node *stable_node,
/* /*
* We come here from above when page->mapping or !PageSwapCache * We come here from above when page->mapping or !PageSwapCache
* suggests that the node is stale; but it might be under migration. * suggests that the node is stale; but it might be under migration.
* We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(), * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
* before checking whether node->kpfn has been changed. * before checking whether node->kpfn has been changed.
*/ */
smp_rmb(); smp_rmb();
@ -852,9 +852,14 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
return err; return err;
} }
static inline struct stable_node *folio_stable_node(struct folio *folio)
{
return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
}
static inline struct stable_node *page_stable_node(struct page *page) static inline struct stable_node *page_stable_node(struct page *page)
{ {
return PageKsm(page) ? page_rmapping(page) : NULL; return folio_stable_node(page_folio(page));
} }
static inline void set_page_stable_node(struct page *page, static inline void set_page_stable_node(struct page *page,
@ -2659,26 +2664,26 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
} }
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage) void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
{ {
struct stable_node *stable_node; struct stable_node *stable_node;
VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio);
VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio);
stable_node = page_stable_node(newpage); stable_node = folio_stable_node(folio);
if (stable_node) { if (stable_node) {
VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio);
stable_node->kpfn = page_to_pfn(newpage); stable_node->kpfn = folio_pfn(newfolio);
/* /*
* newpage->mapping was set in advance; now we need smp_wmb() * newfolio->mapping was set in advance; now we need smp_wmb()
* to make sure that the new stable_node->kpfn is visible * to make sure that the new stable_node->kpfn is visible
* to get_ksm_page() before it can see that oldpage->mapping * to get_ksm_page() before it can see that folio->mapping
* has gone stale (or that PageSwapCache has been cleared). * has gone stale (or that folio_test_swapcache has been cleared).
*/ */
smp_wmb(); smp_wmb();
set_page_stable_node(oldpage, NULL); set_page_stable_node(&folio->page, NULL);
} }
} }
#endif /* CONFIG_MIGRATION */ #endif /* CONFIG_MIGRATION */

View File

@ -539,82 +539,80 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
} }
/* /*
* Copy the page to its new location * Copy the flags and some other ancillary information
*/ */
void migrate_page_states(struct page *newpage, struct page *page) void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
{ {
struct folio *folio = page_folio(page);
struct folio *newfolio = page_folio(newpage);
int cpupid; int cpupid;
if (PageError(page)) if (folio_test_error(folio))
SetPageError(newpage); folio_set_error(newfolio);
if (PageReferenced(page)) if (folio_test_referenced(folio))
SetPageReferenced(newpage); folio_set_referenced(newfolio);
if (PageUptodate(page)) if (folio_test_uptodate(folio))
SetPageUptodate(newpage); folio_mark_uptodate(newfolio);
if (TestClearPageActive(page)) { if (folio_test_clear_active(folio)) {
VM_BUG_ON_PAGE(PageUnevictable(page), page); VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
SetPageActive(newpage); folio_set_active(newfolio);
} else if (TestClearPageUnevictable(page)) } else if (folio_test_clear_unevictable(folio))
SetPageUnevictable(newpage); folio_set_unevictable(newfolio);
if (PageWorkingset(page)) if (folio_test_workingset(folio))
SetPageWorkingset(newpage); folio_set_workingset(newfolio);
if (PageChecked(page)) if (folio_test_checked(folio))
SetPageChecked(newpage); folio_set_checked(newfolio);
if (PageMappedToDisk(page)) if (folio_test_mappedtodisk(folio))
SetPageMappedToDisk(newpage); folio_set_mappedtodisk(newfolio);
/* Move dirty on pages not done by folio_migrate_mapping() */ /* Move dirty on pages not done by folio_migrate_mapping() */
if (PageDirty(page)) if (folio_test_dirty(folio))
SetPageDirty(newpage); folio_set_dirty(newfolio);
if (page_is_young(page)) if (folio_test_young(folio))
set_page_young(newpage); folio_set_young(newfolio);
if (page_is_idle(page)) if (folio_test_idle(folio))
set_page_idle(newpage); folio_set_idle(newfolio);
/* /*
* Copy NUMA information to the new page, to prevent over-eager * Copy NUMA information to the new page, to prevent over-eager
* future migrations of this same page. * future migrations of this same page.
*/ */
cpupid = page_cpupid_xchg_last(page, -1); cpupid = page_cpupid_xchg_last(&folio->page, -1);
page_cpupid_xchg_last(newpage, cpupid); page_cpupid_xchg_last(&newfolio->page, cpupid);
ksm_migrate_page(newpage, page); folio_migrate_ksm(newfolio, folio);
/* /*
* Please do not reorder this without considering how mm/ksm.c's * Please do not reorder this without considering how mm/ksm.c's
* get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache(). * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
*/ */
if (PageSwapCache(page)) if (folio_test_swapcache(folio))
ClearPageSwapCache(page); folio_clear_swapcache(folio);
ClearPagePrivate(page); folio_clear_private(folio);
/* page->private contains hugetlb specific flags */ /* page->private contains hugetlb specific flags */
if (!PageHuge(page)) if (!folio_test_hugetlb(folio))
set_page_private(page, 0); folio->private = NULL;
/* /*
* If any waiters have accumulated on the new page then * If any waiters have accumulated on the new page then
* wake them up. * wake them up.
*/ */
if (PageWriteback(newpage)) if (folio_test_writeback(newfolio))
end_page_writeback(newpage); folio_end_writeback(newfolio);
/* /*
* PG_readahead shares the same bit with PG_reclaim. The above * PG_readahead shares the same bit with PG_reclaim. The above
* end_page_writeback() may clear PG_readahead mistakenly, so set the * end_page_writeback() may clear PG_readahead mistakenly, so set the
* bit after that. * bit after that.
*/ */
if (PageReadahead(page)) if (folio_test_readahead(folio))
SetPageReadahead(newpage); folio_set_readahead(newfolio);
copy_page_owner(page, newpage); folio_copy_owner(newfolio, folio);
if (!PageHuge(page)) if (!folio_test_hugetlb(folio))
mem_cgroup_migrate(folio, newfolio); mem_cgroup_migrate(folio, newfolio);
} }
EXPORT_SYMBOL(migrate_page_states); EXPORT_SYMBOL(folio_migrate_flags);
void migrate_page_copy(struct page *newpage, struct page *page) void migrate_page_copy(struct page *newpage, struct page *page)
{ {
@ -655,7 +653,7 @@ int migrate_page(struct address_space *mapping,
if (mode != MIGRATE_SYNC_NO_COPY) if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page); migrate_page_copy(newpage, page);
else else
migrate_page_states(newpage, page); folio_migrate_flags(newfolio, folio);
return MIGRATEPAGE_SUCCESS; return MIGRATEPAGE_SUCCESS;
} }
EXPORT_SYMBOL(migrate_page); EXPORT_SYMBOL(migrate_page);

View File

@ -210,10 +210,10 @@ void __split_page_owner(struct page *page, unsigned int nr)
} }
} }
void __copy_page_owner(struct page *oldpage, struct page *newpage) void __folio_copy_owner(struct folio *newfolio, struct folio *old)
{ {
struct page_ext *old_ext = lookup_page_ext(oldpage); struct page_ext *old_ext = lookup_page_ext(&old->page);
struct page_ext *new_ext = lookup_page_ext(newpage); struct page_ext *new_ext = lookup_page_ext(&newfolio->page);
struct page_owner *old_page_owner, *new_page_owner; struct page_owner *old_page_owner, *new_page_owner;
if (unlikely(!old_ext || !new_ext)) if (unlikely(!old_ext || !new_ext))
@ -231,11 +231,11 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
new_page_owner->free_ts_nsec = old_page_owner->ts_nsec; new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
/* /*
* We don't clear the bit on the oldpage as it's going to be freed * We don't clear the bit on the old folio as it's going to be freed
* after migration. Until then, the info can be useful in case of * after migration. Until then, the info can be useful in case of
* a bug, and the overall stats will be off a bit only temporarily. * a bug, and the overall stats will be off a bit only temporarily.
* Also, migrate_misplaced_transhuge_page() can still fail the * Also, migrate_misplaced_transhuge_page() can still fail the
* migration and then we want the oldpage to retain the info. But * migration and then we want the old folio to retain the info. But
* in that case we also don't need to explicitly clear the info from * in that case we also don't need to explicitly clear the info from
* the new page, which will be freed. * the new page, which will be freed.
*/ */