2024-10-28 19:53:37 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
|
|
|
/* Page fragment allocator
|
|
|
|
*
|
|
|
|
* Page Fragment:
|
|
|
|
* An arbitrary-length arbitrary-offset area of memory which resides within a
|
|
|
|
* 0 or higher order page. Multiple fragments within that page are
|
|
|
|
* individually refcounted, in the page's reference counter.
|
|
|
|
*
|
|
|
|
* The page_frag functions provide a simple allocation framework for page
|
|
|
|
* fragments. This is used by the network stack and network device drivers to
|
|
|
|
* provide a backing region of memory for use as either an sk_buff->head, or to
|
|
|
|
* be used in the "frags" portion of skb_shared_info.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/gfp_types.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/page_frag_cache.h>
|
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
|
|
|
|
gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
struct page *page = NULL;
|
|
|
|
gfp_t gfp = gfp_mask;
|
|
|
|
|
|
|
|
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
|
|
|
|
gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP |
|
|
|
|
__GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
|
|
|
|
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
|
|
|
|
PAGE_FRAG_CACHE_MAX_ORDER);
|
|
|
|
nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
|
|
|
|
#endif
|
|
|
|
if (unlikely(!page))
|
|
|
|
page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
|
|
|
|
|
|
|
|
nc->va = page ? page_address(page) : NULL;
|
|
|
|
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
void page_frag_cache_drain(struct page_frag_cache *nc)
|
|
|
|
{
|
|
|
|
if (!nc->va)
|
|
|
|
return;
|
|
|
|
|
|
|
|
__page_frag_cache_drain(virt_to_head_page(nc->va), nc->pagecnt_bias);
|
|
|
|
nc->va = NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(page_frag_cache_drain);
|
|
|
|
|
|
|
|
void __page_frag_cache_drain(struct page *page, unsigned int count)
|
|
|
|
{
|
|
|
|
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
|
|
|
|
|
|
|
|
if (page_ref_sub_and_test(page, count))
|
|
|
|
free_unref_page(page, compound_order(page));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_frag_cache_drain);
|
|
|
|
|
|
|
|
void *__page_frag_alloc_align(struct page_frag_cache *nc,
|
|
|
|
unsigned int fragsz, gfp_t gfp_mask,
|
|
|
|
unsigned int align_mask)
|
|
|
|
{
|
2024-10-28 19:53:38 +08:00
|
|
|
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
|
|
|
|
unsigned int size = nc->size;
|
|
|
|
#else
|
2024-10-28 19:53:37 +08:00
|
|
|
unsigned int size = PAGE_SIZE;
|
2024-10-28 19:53:38 +08:00
|
|
|
#endif
|
|
|
|
unsigned int offset;
|
2024-10-28 19:53:37 +08:00
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (unlikely(!nc->va)) {
|
|
|
|
refill:
|
|
|
|
page = __page_frag_cache_refill(nc, gfp_mask);
|
|
|
|
if (!page)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
|
|
|
|
/* if size can vary use size else just use PAGE_SIZE */
|
|
|
|
size = nc->size;
|
|
|
|
#endif
|
|
|
|
/* Even if we own the page, we do not use atomic_set().
|
|
|
|
* This would break get_page_unless_zero() users.
|
|
|
|
*/
|
|
|
|
page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
|
|
|
|
|
|
|
|
/* reset page count bias and offset to start of new frag */
|
|
|
|
nc->pfmemalloc = page_is_pfmemalloc(page);
|
|
|
|
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
|
2024-10-28 19:53:38 +08:00
|
|
|
nc->offset = 0;
|
2024-10-28 19:53:37 +08:00
|
|
|
}
|
|
|
|
|
2024-10-28 19:53:38 +08:00
|
|
|
offset = __ALIGN_KERNEL_MASK(nc->offset, ~align_mask);
|
|
|
|
if (unlikely(offset + fragsz > size)) {
|
|
|
|
if (unlikely(fragsz > PAGE_SIZE)) {
|
|
|
|
/*
|
|
|
|
* The caller is trying to allocate a fragment
|
|
|
|
* with fragsz > PAGE_SIZE but the cache isn't big
|
|
|
|
* enough to satisfy the request, this may
|
|
|
|
* happen in low memory conditions.
|
|
|
|
* We don't release the cache page because
|
|
|
|
* it could make memory pressure worse
|
|
|
|
* so we simply return NULL here.
|
|
|
|
*/
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2024-10-28 19:53:37 +08:00
|
|
|
page = virt_to_page(nc->va);
|
|
|
|
|
|
|
|
if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
|
|
|
|
goto refill;
|
|
|
|
|
|
|
|
if (unlikely(nc->pfmemalloc)) {
|
|
|
|
free_unref_page(page, compound_order(page));
|
|
|
|
goto refill;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, page count is 0, we can safely set it */
|
|
|
|
set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
|
|
|
|
|
|
|
|
/* reset page count bias and offset to start of new frag */
|
|
|
|
nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
|
2024-10-28 19:53:38 +08:00
|
|
|
offset = 0;
|
2024-10-28 19:53:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
nc->pagecnt_bias--;
|
2024-10-28 19:53:38 +08:00
|
|
|
nc->offset = offset + fragsz;
|
2024-10-28 19:53:37 +08:00
|
|
|
|
|
|
|
return nc->va + offset;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__page_frag_alloc_align);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Frees a page fragment allocated out of either a compound or order 0 page.
|
|
|
|
*/
|
|
|
|
void page_frag_free(void *addr)
|
|
|
|
{
|
|
|
|
struct page *page = virt_to_head_page(addr);
|
|
|
|
|
|
|
|
if (unlikely(put_page_testzero(page)))
|
|
|
|
free_unref_page(page, compound_order(page));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(page_frag_free);
|