mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-01 10:43:43 +00:00
mm: percpu: enable per-cpu allocation tagging
Redefine __alloc_percpu, __alloc_percpu_gfp and __alloc_reserved_percpu to record allocations and deallocations done by these functions. [surenb@google.com: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-6-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-30-surenb@google.com Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev> Signed-off-by: Suren Baghdasaryan <surenb@google.com> Tested-by: Kees Cook <keescook@chromium.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Alex Gaynor <alex.gaynor@gmail.com> Cc: Alice Ryhl <aliceryhl@google.com> Cc: Andreas Hindborg <a.hindborg@samsung.com> Cc: Benno Lossin <benno.lossin@proton.me> Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Dennis Zhou <dennis@kernel.org> Cc: Gary Guo <gary@garyguo.net> Cc: Miguel Ojeda <ojeda@kernel.org> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Wedson Almeida Filho <wedsonaf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
60fa4a9e23
commit
24e44cc22a
@ -2,6 +2,7 @@
|
||||
#ifndef __LINUX_PERCPU_H
|
||||
#define __LINUX_PERCPU_H
|
||||
|
||||
#include <linux/alloc_tag.h>
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/smp.h>
|
||||
@ -9,6 +10,7 @@
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
@ -125,7 +127,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
|
||||
pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
|
||||
#endif
|
||||
|
||||
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1);
|
||||
extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
|
||||
extern bool is_kernel_percpu_address(unsigned long addr);
|
||||
|
||||
@ -133,14 +134,16 @@ extern bool is_kernel_percpu_address(unsigned long addr);
|
||||
extern void __init setup_per_cpu_areas(void);
|
||||
#endif
|
||||
|
||||
extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
|
||||
extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
|
||||
extern void free_percpu(void __percpu *__pdata);
|
||||
extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
|
||||
gfp_t gfp) __alloc_size(1);
|
||||
extern size_t pcpu_alloc_size(void __percpu *__pdata);
|
||||
|
||||
DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
|
||||
|
||||
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
|
||||
#define __alloc_percpu_gfp(_size, _align, _gfp) \
|
||||
alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp))
|
||||
#define __alloc_percpu(_size, _align) \
|
||||
alloc_hooks(pcpu_alloc_noprof(_size, _align, false, GFP_KERNEL))
|
||||
#define __alloc_reserved_percpu(_size, _align) \
|
||||
alloc_hooks(pcpu_alloc_noprof(_size, _align, true, GFP_KERNEL))
|
||||
|
||||
#define alloc_percpu_gfp(type, gfp) \
|
||||
(typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
|
||||
@ -149,6 +152,12 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
|
||||
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
|
||||
__alignof__(type))
|
||||
|
||||
extern void free_percpu(void __percpu *__pdata);
|
||||
|
||||
DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
|
||||
|
||||
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
|
||||
|
||||
extern unsigned long pcpu_nr_pages(void);
|
||||
|
||||
#endif /* __LINUX_PERCPU_H */
|
||||
|
62
mm/percpu.c
62
mm/percpu.c
@ -1740,7 +1740,7 @@ static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t s
|
||||
* RETURNS:
|
||||
* Percpu pointer to the allocated area on success, NULL on failure.
|
||||
*/
|
||||
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
|
||||
void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
|
||||
gfp_t gfp)
|
||||
{
|
||||
gfp_t pcpu_gfp;
|
||||
@ -1907,6 +1907,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
|
||||
|
||||
pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
|
||||
|
||||
pcpu_alloc_tag_alloc_hook(chunk, off, size);
|
||||
|
||||
return ptr;
|
||||
|
||||
fail_unlock:
|
||||
@ -1935,61 +1937,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* __alloc_percpu_gfp - allocate dynamic percpu area
|
||||
* @size: size of area to allocate in bytes
|
||||
* @align: alignment of area (max PAGE_SIZE)
|
||||
* @gfp: allocation flags
|
||||
*
|
||||
* Allocate zero-filled percpu area of @size bytes aligned at @align. If
|
||||
* @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
|
||||
* be called from any context but is a lot more likely to fail. If @gfp
|
||||
* has __GFP_NOWARN then no warning will be triggered on invalid or failed
|
||||
* allocation requests.
|
||||
*
|
||||
* RETURNS:
|
||||
* Percpu pointer to the allocated area on success, NULL on failure.
|
||||
*/
|
||||
void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
|
||||
{
|
||||
return pcpu_alloc(size, align, false, gfp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
|
||||
|
||||
/**
|
||||
* __alloc_percpu - allocate dynamic percpu area
|
||||
* @size: size of area to allocate in bytes
|
||||
* @align: alignment of area (max PAGE_SIZE)
|
||||
*
|
||||
* Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
|
||||
*/
|
||||
void __percpu *__alloc_percpu(size_t size, size_t align)
|
||||
{
|
||||
return pcpu_alloc(size, align, false, GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__alloc_percpu);
|
||||
|
||||
/**
|
||||
* __alloc_reserved_percpu - allocate reserved percpu area
|
||||
* @size: size of area to allocate in bytes
|
||||
* @align: alignment of area (max PAGE_SIZE)
|
||||
*
|
||||
* Allocate zero-filled percpu area of @size bytes aligned at @align
|
||||
* from reserved percpu area if arch has set it up; otherwise,
|
||||
* allocation is served from the same dynamic area. Might sleep.
|
||||
* Might trigger writeouts.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Does GFP_KERNEL allocation.
|
||||
*
|
||||
* RETURNS:
|
||||
* Percpu pointer to the allocated area on success, NULL on failure.
|
||||
*/
|
||||
void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
|
||||
{
|
||||
return pcpu_alloc(size, align, true, GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcpu_alloc_noprof);
|
||||
|
||||
/**
|
||||
* pcpu_balance_free - manage the amount of free chunks
|
||||
@ -2328,6 +2276,8 @@ void free_percpu(void __percpu *ptr)
|
||||
spin_lock_irqsave(&pcpu_lock, flags);
|
||||
size = pcpu_free_area(chunk, off);
|
||||
|
||||
pcpu_alloc_tag_free_hook(chunk, off, size);
|
||||
|
||||
pcpu_memcg_free_hook(chunk, off, size);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user