mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 10:43:43 +00:00
mm: convert prep_transhuge_page() to folio_prep_large_rmappable()
Match folio_undo_large_rmappable(), and move the casting from page to folio into the callers (which they were largely doing anyway). Link: https://lkml.kernel.org/r/20230816151201.3655946-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Yanteng Si <siyanteng@loongson.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
8dc4a8f1e0
commit
da6e7bf3a0
@ -140,7 +140,7 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
|
|||||||
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
|
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||||
unsigned long len, unsigned long pgoff, unsigned long flags);
|
unsigned long len, unsigned long pgoff, unsigned long flags);
|
||||||
|
|
||||||
void prep_transhuge_page(struct page *page);
|
void folio_prep_large_rmappable(struct folio *folio);
|
||||||
bool can_split_folio(struct folio *folio, int *pextra_pins);
|
bool can_split_folio(struct folio *folio, int *pextra_pins);
|
||||||
int split_huge_page_to_list(struct page *page, struct list_head *list);
|
int split_huge_page_to_list(struct page *page, struct list_head *list);
|
||||||
static inline int split_huge_page(struct page *page)
|
static inline int split_huge_page(struct page *page)
|
||||||
@ -280,7 +280,7 @@ static inline bool hugepage_vma_check(struct vm_area_struct *vma,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void prep_transhuge_page(struct page *page) {}
|
static inline void folio_prep_large_rmappable(struct folio *folio) {}
|
||||||
|
|
||||||
#define transparent_hugepage_flags 0UL
|
#define transparent_hugepage_flags 0UL
|
||||||
|
|
||||||
|
@ -577,10 +577,8 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void prep_transhuge_page(struct page *page)
|
void folio_prep_large_rmappable(struct folio *folio)
|
||||||
{
|
{
|
||||||
struct folio *folio = (struct folio *)page;
|
|
||||||
|
|
||||||
VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
|
VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio);
|
||||||
INIT_LIST_HEAD(&folio->_deferred_list);
|
INIT_LIST_HEAD(&folio->_deferred_list);
|
||||||
folio_set_compound_dtor(folio, TRANSHUGE_PAGE_DTOR);
|
folio_set_compound_dtor(folio, TRANSHUGE_PAGE_DTOR);
|
||||||
|
@ -896,7 +896,7 @@ static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
prep_transhuge_page(*hpage);
|
folio_prep_large_rmappable((struct folio *)*hpage);
|
||||||
count_vm_event(THP_COLLAPSE_ALLOC);
|
count_vm_event(THP_COLLAPSE_ALLOC);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -2195,9 +2195,9 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
|
|||||||
mpol_cond_put(pol);
|
mpol_cond_put(pol);
|
||||||
gfp |= __GFP_COMP;
|
gfp |= __GFP_COMP;
|
||||||
page = alloc_page_interleave(gfp, order, nid);
|
page = alloc_page_interleave(gfp, order, nid);
|
||||||
if (page && order > 1)
|
|
||||||
prep_transhuge_page(page);
|
|
||||||
folio = (struct folio *)page;
|
folio = (struct folio *)page;
|
||||||
|
if (folio && order > 1)
|
||||||
|
folio_prep_large_rmappable(folio);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2208,9 +2208,9 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
|
|||||||
gfp |= __GFP_COMP;
|
gfp |= __GFP_COMP;
|
||||||
page = alloc_pages_preferred_many(gfp, order, node, pol);
|
page = alloc_pages_preferred_many(gfp, order, node, pol);
|
||||||
mpol_cond_put(pol);
|
mpol_cond_put(pol);
|
||||||
if (page && order > 1)
|
|
||||||
prep_transhuge_page(page);
|
|
||||||
folio = (struct folio *)page;
|
folio = (struct folio *)page;
|
||||||
|
if (folio && order > 1)
|
||||||
|
folio_prep_large_rmappable(folio);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2306,10 +2306,11 @@ EXPORT_SYMBOL(alloc_pages);
|
|||||||
struct folio *folio_alloc(gfp_t gfp, unsigned order)
|
struct folio *folio_alloc(gfp_t gfp, unsigned order)
|
||||||
{
|
{
|
||||||
struct page *page = alloc_pages(gfp | __GFP_COMP, order);
|
struct page *page = alloc_pages(gfp | __GFP_COMP, order);
|
||||||
|
struct folio *folio = (struct folio *)page;
|
||||||
|
|
||||||
if (page && order > 1)
|
if (folio && order > 1)
|
||||||
prep_transhuge_page(page);
|
folio_prep_large_rmappable(folio);
|
||||||
return (struct folio *)page;
|
return folio;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(folio_alloc);
|
EXPORT_SYMBOL(folio_alloc);
|
||||||
|
|
||||||
|
@ -4489,10 +4489,11 @@ struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
|
|||||||
{
|
{
|
||||||
struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
|
struct page *page = __alloc_pages(gfp | __GFP_COMP, order,
|
||||||
preferred_nid, nodemask);
|
preferred_nid, nodemask);
|
||||||
|
struct folio *folio = (struct folio *)page;
|
||||||
|
|
||||||
if (page && order > 1)
|
if (folio && order > 1)
|
||||||
prep_transhuge_page(page);
|
folio_prep_large_rmappable(folio);
|
||||||
return (struct folio *)page;
|
return folio;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__folio_alloc);
|
EXPORT_SYMBOL(__folio_alloc);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user