mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-17 02:36:21 +00:00
mm/hugetlb: convert alloc_surplus_huge_page() to folios
Change alloc_surplus_huge_page() to alloc_surplus_hugetlb_folio() and update its callers. Link: https://lkml.kernel.org/r/20230113223057.173292-5-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
a36f1e9024
commit
3a740e8bb5
27
mm/hugetlb.c
27
mm/hugetlb.c
@ -2378,8 +2378,8 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
|
||||
/*
|
||||
* Allocates a fresh surplus page from the page allocator.
|
||||
*/
|
||||
static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
|
||||
int nid, nodemask_t *nmask)
|
||||
static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
|
||||
gfp_t gfp_mask, int nid, nodemask_t *nmask)
|
||||
{
|
||||
struct folio *folio = NULL;
|
||||
|
||||
@ -2416,7 +2416,7 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
|
||||
out_unlock:
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
|
||||
return &folio->page;
|
||||
return folio;
|
||||
}
|
||||
|
||||
static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
|
||||
@ -2449,7 +2449,7 @@ static
|
||||
struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
|
||||
struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
struct folio *folio = NULL;
|
||||
struct mempolicy *mpol;
|
||||
gfp_t gfp_mask = htlb_alloc_mask(h);
|
||||
int nid;
|
||||
@ -2460,16 +2460,16 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
|
||||
gfp_t gfp = gfp_mask | __GFP_NOWARN;
|
||||
|
||||
gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
|
||||
page = alloc_surplus_huge_page(h, gfp, nid, nodemask);
|
||||
folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
|
||||
|
||||
/* Fallback to all nodes if page==NULL */
|
||||
nodemask = NULL;
|
||||
}
|
||||
|
||||
if (!page)
|
||||
page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
|
||||
if (!folio)
|
||||
folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
|
||||
mpol_cond_put(mpol);
|
||||
return page;
|
||||
return &folio->page;
|
||||
}
|
||||
|
||||
/* page migration callback function */
|
||||
@ -2518,6 +2518,7 @@ static int gather_surplus_pages(struct hstate *h, long delta)
|
||||
__must_hold(&hugetlb_lock)
|
||||
{
|
||||
LIST_HEAD(surplus_list);
|
||||
struct folio *folio;
|
||||
struct page *page, *tmp;
|
||||
int ret;
|
||||
long i;
|
||||
@ -2537,13 +2538,13 @@ static int gather_surplus_pages(struct hstate *h, long delta)
|
||||
retry:
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
for (i = 0; i < needed; i++) {
|
||||
page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
|
||||
folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
|
||||
NUMA_NO_NODE, NULL);
|
||||
if (!page) {
|
||||
if (!folio) {
|
||||
alloc_ok = false;
|
||||
break;
|
||||
}
|
||||
list_add(&page->lru, &surplus_list);
|
||||
list_add(&folio->lru, &surplus_list);
|
||||
cond_resched();
|
||||
}
|
||||
allocated += i;
|
||||
@ -3496,7 +3497,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
|
||||
* First take pages out of surplus state. Then make up the
|
||||
* remaining difference by allocating fresh huge pages.
|
||||
*
|
||||
* We might race with alloc_surplus_huge_page() here and be unable
|
||||
* We might race with alloc_surplus_hugetlb_folio() here and be unable
|
||||
* to convert a surplus huge page to a normal huge page. That is
|
||||
* not critical, though, it just means the overall size of the
|
||||
* pool might be one hugepage larger than it needs to be, but
|
||||
@ -3539,7 +3540,7 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
|
||||
* By placing pages into the surplus state independent of the
|
||||
* overcommit value, we are allowing the surplus pool size to
|
||||
* exceed overcommit. There are few sane options here. Since
|
||||
* alloc_surplus_huge_page() is checking the global counter,
|
||||
* alloc_surplus_hugetlb_folio() is checking the global counter,
|
||||
* though, we'll note that we're not allowed to exceed surplus
|
||||
* and won't grow the pool anywhere else. Not until one of the
|
||||
* sysctls are changed, or the surplus pages go out of use.
|
||||
|
Loading…
x
Reference in New Issue
Block a user