mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
mm: add is_huge_zero_folio()
This is the folio equivalent of is_huge_zero_page(). It doesn't add any efficiency, but it does prevent the caller from passing a tail page and getting confused when the predicate returns false. Link: https://lkml.kernel.org/r/20240326202833.523759-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
4d30eac374
commit
5beaee54a3
@ -155,7 +155,7 @@ u64 stable_page_flags(const struct page *page)
|
||||
else if (folio_test_large(folio)) {
|
||||
if ((k & (1 << PG_lru)) || is_anon)
|
||||
u |= 1 << KPF_THP;
|
||||
else if (is_huge_zero_page(&folio->page)) {
|
||||
else if (is_huge_zero_folio(folio)) {
|
||||
u |= 1 << KPF_ZERO_PAGE;
|
||||
u |= 1 << KPF_THP;
|
||||
}
|
||||
|
@ -356,6 +356,11 @@ static inline bool is_huge_zero_page(const struct page *page)
|
||||
return READ_ONCE(huge_zero_page) == page;
|
||||
}
|
||||
|
||||
static inline bool is_huge_zero_folio(const struct folio *folio)
|
||||
{
|
||||
return READ_ONCE(huge_zero_page) == &folio->page;
|
||||
}
|
||||
|
||||
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
||||
{
|
||||
return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
|
||||
@ -485,6 +490,11 @@ static inline bool is_huge_zero_page(const struct page *page)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_huge_zero_folio(const struct folio *folio)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
||||
{
|
||||
return false;
|
||||
|
@ -789,12 +789,12 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio)
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool is_transparent_hugepage(struct folio *folio)
|
||||
static inline bool is_transparent_hugepage(const struct folio *folio)
|
||||
{
|
||||
if (!folio_test_large(folio))
|
||||
return false;
|
||||
|
||||
return is_huge_zero_page(&folio->page) ||
|
||||
return is_huge_zero_folio(folio) ||
|
||||
folio_test_large_rmappable(folio);
|
||||
}
|
||||
|
||||
@ -3085,7 +3085,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
||||
}
|
||||
|
||||
|
||||
is_hzp = is_huge_zero_page(&folio->page);
|
||||
is_hzp = is_huge_zero_folio(folio);
|
||||
if (is_hzp) {
|
||||
pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
|
||||
return -EBUSY;
|
||||
|
@ -510,7 +510,7 @@ static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
|
||||
return;
|
||||
}
|
||||
folio = pfn_folio(pmd_pfn(*pmd));
|
||||
if (is_huge_zero_page(&folio->page)) {
|
||||
if (is_huge_zero_folio(folio)) {
|
||||
walk->action = ACTION_CONTINUE;
|
||||
return;
|
||||
}
|
||||
|
@ -985,7 +985,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
|
||||
struct folio *folio = folios->folios[i];
|
||||
unsigned int nr_refs = refs ? refs[i] : 1;
|
||||
|
||||
if (is_huge_zero_page(&folio->page))
|
||||
if (is_huge_zero_folio(folio))
|
||||
continue;
|
||||
|
||||
if (folio_is_zone_device(folio)) {
|
||||
|
@ -301,7 +301,7 @@ void free_page_and_swap_cache(struct page *page)
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
free_swap_cache(folio);
|
||||
if (!is_huge_zero_page(page))
|
||||
if (!is_huge_zero_folio(folio))
|
||||
folio_put(folio);
|
||||
}
|
||||
|
||||
|
@ -1664,7 +1664,7 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
|
||||
!pmd_none(dst_pmdval)) {
|
||||
struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
|
||||
|
||||
if (!folio || (!is_huge_zero_page(&folio->page) &&
|
||||
if (!folio || (!is_huge_zero_folio(folio) &&
|
||||
!PageAnonExclusive(&folio->page))) {
|
||||
spin_unlock(ptl);
|
||||
err = -EBUSY;
|
||||
|
Loading…
Reference in New Issue
Block a user