mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
46d44d09d2
It adds a new_order parameter to set new page order in page owner. It prepares for upcoming changes to support split huge page to any lower order. Link: https://lkml.kernel.org/r/20240226205534.1603748-7-zi.yan@sent.com Signed-off-by: Zi Yan <ziy@nvidia.com> Cc: David Hildenbrand <david@redhat.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Luis Chamberlain <mcgrof@kernel.org> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Michal Koutny <mkoutny@suse.com> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Yang Shi <shy828301@gmail.com> Cc: Yu Zhao <yuzhao@google.com> Cc: Zach O'Keefe <zokeefe@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
79 lines
2.4 KiB
C
79 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __LINUX_PAGE_OWNER_H
|
|
#define __LINUX_PAGE_OWNER_H
|
|
|
|
#include <linux/jump_label.h>
|
|
|
|
#ifdef CONFIG_PAGE_OWNER
|
|
extern struct static_key_false page_owner_inited;
|
|
extern struct page_ext_operations page_owner_ops;
|
|
|
|
extern void __reset_page_owner(struct page *page, unsigned short order);
|
|
extern void __set_page_owner(struct page *page,
|
|
unsigned short order, gfp_t gfp_mask);
|
|
extern void __split_page_owner(struct page *page, int old_order,
|
|
int new_order);
|
|
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
|
|
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
|
|
extern void __dump_page_owner(const struct page *page);
|
|
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
|
|
pg_data_t *pgdat, struct zone *zone);
|
|
|
|
static inline void reset_page_owner(struct page *page, unsigned short order)
|
|
{
|
|
if (static_branch_unlikely(&page_owner_inited))
|
|
__reset_page_owner(page, order);
|
|
}
|
|
|
|
static inline void set_page_owner(struct page *page,
|
|
unsigned short order, gfp_t gfp_mask)
|
|
{
|
|
if (static_branch_unlikely(&page_owner_inited))
|
|
__set_page_owner(page, order, gfp_mask);
|
|
}
|
|
|
|
static inline void split_page_owner(struct page *page, int old_order,
|
|
int new_order)
|
|
{
|
|
if (static_branch_unlikely(&page_owner_inited))
|
|
__split_page_owner(page, old_order, new_order);
|
|
}
|
|
static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
|
|
{
|
|
if (static_branch_unlikely(&page_owner_inited))
|
|
__folio_copy_owner(newfolio, old);
|
|
}
|
|
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
|
|
{
|
|
if (static_branch_unlikely(&page_owner_inited))
|
|
__set_page_owner_migrate_reason(page, reason);
|
|
}
|
|
static inline void dump_page_owner(const struct page *page)
|
|
{
|
|
if (static_branch_unlikely(&page_owner_inited))
|
|
__dump_page_owner(page);
|
|
}
|
|
#else
|
|
static inline void reset_page_owner(struct page *page, unsigned short order)
|
|
{
|
|
}
|
|
static inline void set_page_owner(struct page *page,
|
|
unsigned short order, gfp_t gfp_mask)
|
|
{
|
|
}
|
|
static inline void split_page_owner(struct page *page, int old_order,
|
|
int new_order)
|
|
{
|
|
}
|
|
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
|
|
{
|
|
}
|
|
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
|
|
{
|
|
}
|
|
static inline void dump_page_owner(const struct page *page)
|
|
{
|
|
}
|
|
#endif /* CONFIG_PAGE_OWNER */
|
|
#endif /* __LINUX_PAGE_OWNER_H */
|