mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-07 14:32:23 +00:00
mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation
Use the isolate_folio_to_list() to unify hugetlb/LRU/non-LRU folio isolation, which cleanup code a bit and save a few calls to compound_head(). [wangkefeng.wang@huawei.com: various fixes] Link: https://lkml.kernel.org/r/20240829150500.2599549-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20240827114728.3212578-6-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com> Cc: Dan Carpenter <dan.carpenter@linaro.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
f1264e9531
commit
6f1833b820
@ -1772,15 +1772,14 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
|
|||||||
|
|
||||||
static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
||||||
{
|
{
|
||||||
|
struct folio *folio;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
struct page *page;
|
|
||||||
LIST_HEAD(source);
|
LIST_HEAD(source);
|
||||||
static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
|
static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
|
||||||
DEFAULT_RATELIMIT_BURST);
|
DEFAULT_RATELIMIT_BURST);
|
||||||
|
|
||||||
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
||||||
struct folio *folio;
|
struct page *page;
|
||||||
bool isolated;
|
|
||||||
|
|
||||||
if (!pfn_valid(pfn))
|
if (!pfn_valid(pfn))
|
||||||
continue;
|
continue;
|
||||||
@ -1811,34 +1810,21 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (folio_test_hugetlb(folio)) {
|
if (!folio_try_get(folio))
|
||||||
isolate_hugetlb(folio, &source);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
if (!get_page_unless_zero(page))
|
if (unlikely(page_folio(page) != folio))
|
||||||
continue;
|
goto put_folio;
|
||||||
/*
|
|
||||||
* We can skip free pages. And we can deal with pages on
|
|
||||||
* LRU and non-lru movable pages.
|
|
||||||
*/
|
|
||||||
if (PageLRU(page))
|
|
||||||
isolated = isolate_lru_page(page);
|
|
||||||
else
|
|
||||||
isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
|
|
||||||
if (isolated) {
|
|
||||||
list_add_tail(&page->lru, &source);
|
|
||||||
if (!__PageMovable(page))
|
|
||||||
inc_node_page_state(page, NR_ISOLATED_ANON +
|
|
||||||
page_is_file_lru(page));
|
|
||||||
|
|
||||||
} else {
|
if (!isolate_folio_to_list(folio, &source)) {
|
||||||
if (__ratelimit(&migrate_rs)) {
|
if (__ratelimit(&migrate_rs)) {
|
||||||
pr_warn("failed to isolate pfn %lx\n", pfn);
|
pr_warn("failed to isolate pfn %lx\n",
|
||||||
|
page_to_pfn(page));
|
||||||
dump_page(page, "isolation failed");
|
dump_page(page, "isolation failed");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
put_page(page);
|
put_folio:
|
||||||
|
folio_put(folio);
|
||||||
}
|
}
|
||||||
if (!list_empty(&source)) {
|
if (!list_empty(&source)) {
|
||||||
nodemask_t nmask = node_states[N_MEMORY];
|
nodemask_t nmask = node_states[N_MEMORY];
|
||||||
@ -1853,7 +1839,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
|||||||
* We have checked that migration range is on a single zone so
|
* We have checked that migration range is on a single zone so
|
||||||
* we can use the nid of the first page to all the others.
|
* we can use the nid of the first page to all the others.
|
||||||
*/
|
*/
|
||||||
mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
|
mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* try to allocate from a different node but reuse this node
|
* try to allocate from a different node but reuse this node
|
||||||
@ -1866,11 +1852,12 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
|||||||
ret = migrate_pages(&source, alloc_migration_target, NULL,
|
ret = migrate_pages(&source, alloc_migration_target, NULL,
|
||||||
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
|
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
list_for_each_entry(page, &source, lru) {
|
list_for_each_entry(folio, &source, lru) {
|
||||||
if (__ratelimit(&migrate_rs)) {
|
if (__ratelimit(&migrate_rs)) {
|
||||||
pr_warn("migrating pfn %lx failed ret:%d\n",
|
pr_warn("migrating pfn %lx failed ret:%d\n",
|
||||||
page_to_pfn(page), ret);
|
folio_pfn(folio), ret);
|
||||||
dump_page(page, "migration failure");
|
dump_page(&folio->page,
|
||||||
|
"migration failure");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
putback_movable_pages(&source);
|
putback_movable_pages(&source);
|
||||||
|
Loading…
Reference in New Issue
Block a user