mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 22:50:41 +00:00
hugetlb: add hstate_is_gigantic()
Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Reviewed-by: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com> Reviewed-by: Davidlohr Bueso <davidlohr@hp.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2906dd5283
commit
bae7f4ae14
@ -343,6 +343,11 @@ static inline unsigned huge_page_shift(struct hstate *h)
|
|||||||
return h->order + PAGE_SHIFT;
|
return h->order + PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool hstate_is_gigantic(struct hstate *h)
|
||||||
|
{
|
||||||
|
return huge_page_order(h) >= MAX_ORDER;
|
||||||
|
}
|
||||||
|
|
||||||
static inline unsigned int pages_per_huge_page(struct hstate *h)
|
static inline unsigned int pages_per_huge_page(struct hstate *h)
|
||||||
{
|
{
|
||||||
return 1 << h->order;
|
return 1 << h->order;
|
||||||
|
28
mm/hugetlb.c
28
mm/hugetlb.c
@ -611,7 +611,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
VM_BUG_ON(h->order >= MAX_ORDER);
|
VM_BUG_ON(hstate_is_gigantic(h));
|
||||||
|
|
||||||
h->nr_huge_pages--;
|
h->nr_huge_pages--;
|
||||||
h->nr_huge_pages_node[page_to_nid(page)]--;
|
h->nr_huge_pages_node[page_to_nid(page)]--;
|
||||||
@ -664,7 +664,7 @@ static void free_huge_page(struct page *page)
|
|||||||
if (restore_reserve)
|
if (restore_reserve)
|
||||||
h->resv_huge_pages++;
|
h->resv_huge_pages++;
|
||||||
|
|
||||||
if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
|
if (h->surplus_huge_pages_node[nid] && !hstate_is_gigantic(h)) {
|
||||||
/* remove the page from active list */
|
/* remove the page from active list */
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
update_and_free_page(h, page);
|
update_and_free_page(h, page);
|
||||||
@ -768,7 +768,7 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
|
|||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
if (h->order >= MAX_ORDER)
|
if (hstate_is_gigantic(h))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
page = alloc_pages_exact_node(nid,
|
page = alloc_pages_exact_node(nid,
|
||||||
@ -962,7 +962,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned int r_nid;
|
unsigned int r_nid;
|
||||||
|
|
||||||
if (h->order >= MAX_ORDER)
|
if (hstate_is_gigantic(h))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1155,7 +1155,7 @@ static void return_unused_surplus_pages(struct hstate *h,
|
|||||||
h->resv_huge_pages -= unused_resv_pages;
|
h->resv_huge_pages -= unused_resv_pages;
|
||||||
|
|
||||||
/* Cannot return gigantic pages currently */
|
/* Cannot return gigantic pages currently */
|
||||||
if (h->order >= MAX_ORDER)
|
if (hstate_is_gigantic(h))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
|
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
|
||||||
@ -1355,7 +1355,7 @@ static void __init gather_bootmem_prealloc(void)
|
|||||||
* fix confusing memory reports from free(1) and another
|
* fix confusing memory reports from free(1) and another
|
||||||
* side-effects, like CommitLimit going negative.
|
* side-effects, like CommitLimit going negative.
|
||||||
*/
|
*/
|
||||||
if (h->order > (MAX_ORDER - 1))
|
if (hstate_is_gigantic(h))
|
||||||
adjust_managed_page_count(page, 1 << h->order);
|
adjust_managed_page_count(page, 1 << h->order);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1365,7 +1365,7 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
|
|||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
for (i = 0; i < h->max_huge_pages; ++i) {
|
for (i = 0; i < h->max_huge_pages; ++i) {
|
||||||
if (h->order >= MAX_ORDER) {
|
if (hstate_is_gigantic(h)) {
|
||||||
if (!alloc_bootmem_huge_page(h))
|
if (!alloc_bootmem_huge_page(h))
|
||||||
break;
|
break;
|
||||||
} else if (!alloc_fresh_huge_page(h,
|
} else if (!alloc_fresh_huge_page(h,
|
||||||
@ -1381,7 +1381,7 @@ static void __init hugetlb_init_hstates(void)
|
|||||||
|
|
||||||
for_each_hstate(h) {
|
for_each_hstate(h) {
|
||||||
/* oversize hugepages were init'ed in early boot */
|
/* oversize hugepages were init'ed in early boot */
|
||||||
if (h->order < MAX_ORDER)
|
if (!hstate_is_gigantic(h))
|
||||||
hugetlb_hstate_alloc_pages(h);
|
hugetlb_hstate_alloc_pages(h);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1415,7 +1415,7 @@ static void try_to_free_low(struct hstate *h, unsigned long count,
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (h->order >= MAX_ORDER)
|
if (hstate_is_gigantic(h))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for_each_node_mask(i, *nodes_allowed) {
|
for_each_node_mask(i, *nodes_allowed) {
|
||||||
@ -1478,7 +1478,7 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
|
|||||||
{
|
{
|
||||||
unsigned long min_count, ret;
|
unsigned long min_count, ret;
|
||||||
|
|
||||||
if (h->order >= MAX_ORDER)
|
if (hstate_is_gigantic(h))
|
||||||
return h->max_huge_pages;
|
return h->max_huge_pages;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1605,7 +1605,7 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
h = kobj_to_hstate(kobj, &nid);
|
h = kobj_to_hstate(kobj, &nid);
|
||||||
if (h->order >= MAX_ORDER) {
|
if (hstate_is_gigantic(h)) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -1688,7 +1688,7 @@ static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
|
|||||||
unsigned long input;
|
unsigned long input;
|
||||||
struct hstate *h = kobj_to_hstate(kobj, NULL);
|
struct hstate *h = kobj_to_hstate(kobj, NULL);
|
||||||
|
|
||||||
if (h->order >= MAX_ORDER)
|
if (hstate_is_gigantic(h))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
err = kstrtoul(buf, 10, &input);
|
err = kstrtoul(buf, 10, &input);
|
||||||
@ -2112,7 +2112,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
|
|||||||
|
|
||||||
tmp = h->max_huge_pages;
|
tmp = h->max_huge_pages;
|
||||||
|
|
||||||
if (write && h->order >= MAX_ORDER)
|
if (write && hstate_is_gigantic(h))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
table->data = &tmp;
|
table->data = &tmp;
|
||||||
@ -2168,7 +2168,7 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
|
|||||||
|
|
||||||
tmp = h->nr_overcommit_huge_pages;
|
tmp = h->nr_overcommit_huge_pages;
|
||||||
|
|
||||||
if (write && h->order >= MAX_ORDER)
|
if (write && hstate_is_gigantic(h))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
table->data = &tmp;
|
table->data = &tmp;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user