mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 09:12:07 +00:00
mm/page_alloc: export free_frozen_pages() instead of free_unref_page()
We already have the concept of "frozen pages" (eg page_ref_freeze()), so let's not complicate things by also having the concept of "unref pages". Link: https://lkml.kernel.org/r/20241125210149.2976098-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: William Kucharski <william.kucharski@oracle.com> Reviewed-by: Miaohe Lin <linmiaohe@huawei.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Zi Yan <ziy@nvidia.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
778cc14761
commit
2a5fb4098d
@ -741,7 +741,7 @@ extern bool free_pages_prepare(struct page *page, unsigned int order);
|
||||
|
||||
extern int user_min_free_kbytes;
|
||||
|
||||
void free_unref_page(struct page *page, unsigned int order);
|
||||
void free_frozen_pages(struct page *page, unsigned int order);
|
||||
void free_unref_folios(struct folio_batch *fbatch);
|
||||
|
||||
extern void zone_pcp_reset(struct zone *zone);
|
||||
|
@ -2592,9 +2592,9 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
|
||||
return high;
|
||||
}
|
||||
|
||||
static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
|
||||
struct page *page, int migratetype,
|
||||
unsigned int order)
|
||||
static void free_frozen_page_commit(struct zone *zone,
|
||||
struct per_cpu_pages *pcp, struct page *page, int migratetype,
|
||||
unsigned int order)
|
||||
{
|
||||
int high, batch;
|
||||
int pindex;
|
||||
@ -2643,7 +2643,7 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp,
|
||||
/*
|
||||
* Free a pcp page
|
||||
*/
|
||||
void free_unref_page(struct page *page, unsigned int order)
|
||||
void free_frozen_pages(struct page *page, unsigned int order)
|
||||
{
|
||||
unsigned long __maybe_unused UP_flags;
|
||||
struct per_cpu_pages *pcp;
|
||||
@ -2679,7 +2679,7 @@ void free_unref_page(struct page *page, unsigned int order)
|
||||
pcp_trylock_prepare(UP_flags);
|
||||
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
|
||||
if (pcp) {
|
||||
free_unref_page_commit(zone, pcp, page, migratetype, order);
|
||||
free_frozen_page_commit(zone, pcp, page, migratetype, order);
|
||||
pcp_spin_unlock(pcp);
|
||||
} else {
|
||||
free_one_page(zone, page, pfn, order, FPI_NONE);
|
||||
@ -2743,7 +2743,7 @@ void free_unref_folios(struct folio_batch *folios)
|
||||
|
||||
/*
|
||||
* Free isolated pages directly to the
|
||||
* allocator, see comment in free_unref_page.
|
||||
* allocator, see comment in free_frozen_pages.
|
||||
*/
|
||||
if (is_migrate_isolate(migratetype)) {
|
||||
free_one_page(zone, &folio->page, pfn,
|
||||
@ -2774,7 +2774,7 @@ void free_unref_folios(struct folio_batch *folios)
|
||||
migratetype = MIGRATE_MOVABLE;
|
||||
|
||||
trace_mm_page_free_batched(&folio->page);
|
||||
free_unref_page_commit(zone, pcp, &folio->page, migratetype,
|
||||
free_frozen_page_commit(zone, pcp, &folio->page, migratetype,
|
||||
order);
|
||||
}
|
||||
|
||||
@ -4837,11 +4837,11 @@ void __free_pages(struct page *page, unsigned int order)
|
||||
struct alloc_tag *tag = pgalloc_tag_get(page);
|
||||
|
||||
if (put_page_testzero(page))
|
||||
free_unref_page(page, order);
|
||||
free_frozen_pages(page, order);
|
||||
else if (!head) {
|
||||
pgalloc_tag_sub_pages(tag, (1 << order) - 1);
|
||||
while (order-- > 0)
|
||||
free_unref_page(page + (1 << order), order);
|
||||
free_frozen_pages(page + (1 << order), order);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__free_pages);
|
||||
|
@ -86,7 +86,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count)
|
||||
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
|
||||
|
||||
if (page_ref_sub_and_test(page, count))
|
||||
free_unref_page(page, compound_order(page));
|
||||
free_frozen_pages(page, compound_order(page));
|
||||
}
|
||||
EXPORT_SYMBOL(__page_frag_cache_drain);
|
||||
|
||||
@ -138,7 +138,7 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
|
||||
goto refill;
|
||||
|
||||
if (unlikely(encoded_page_decode_pfmemalloc(encoded_page))) {
|
||||
free_unref_page(page,
|
||||
free_frozen_pages(page,
|
||||
encoded_page_decode_order(encoded_page));
|
||||
goto refill;
|
||||
}
|
||||
@ -166,6 +166,6 @@ void page_frag_free(void *addr)
|
||||
struct page *page = virt_to_head_page(addr);
|
||||
|
||||
if (unlikely(put_page_testzero(page)))
|
||||
free_unref_page(page, compound_order(page));
|
||||
free_frozen_pages(page, compound_order(page));
|
||||
}
|
||||
EXPORT_SYMBOL(page_frag_free);
|
||||
|
@ -109,7 +109,7 @@ void __folio_put(struct folio *folio)
|
||||
page_cache_release(folio);
|
||||
folio_unqueue_deferred_split(folio);
|
||||
mem_cgroup_uncharge(folio);
|
||||
free_unref_page(&folio->page, folio_order(folio));
|
||||
free_frozen_pages(&folio->page, folio_order(folio));
|
||||
}
|
||||
EXPORT_SYMBOL(__folio_put);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user