mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
2c321f3f70
Main goal of memory allocation profiling patchset is to provide accounting that is cheap enough to run in production. To achieve that we inject counters using codetags at the allocation call sites to account every time allocation is made. This injection allows us to perform accounting efficiently because injected counters are immediately available as opposed to the alternative methods, such as using _RET_IP_, which would require counter lookup and appropriate locking that makes accounting much more expensive. This method requires all allocation functions to inject separate counters at their call sites so that their callers can be individually accounted. Counter injection is implemented by allocation hooks which should wrap all allocation functions. Inlined functions which perform allocations but do not use allocation hooks are directly charged for the allocations they perform. In most cases these functions are just specialized allocation wrappers used from multiple places to allocate objects of a specific type. It would be more useful to do the accounting at their call sites instead. Instrument these helpers to do accounting at the call site. Simple inlined allocation wrappers are converted directly into macros. More complex allocators or allocators with documentation are converted into _noprof versions and allocation hooks are added. This allows memory allocation profiling mechanism to charge allocations to the callers of these functions. Link: https://lkml.kernel.org/r/20240415020731.1152108-1-surenb@google.com Signed-off-by: Suren Baghdasaryan <surenb@google.com> Acked-by: Jan Kara <jack@suse.cz> [jbd2] Cc: Anna Schumaker <anna@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Benjamin Tissoires <benjamin.tissoires@redhat.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Eric Dumazet <edumazet@google.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Jakub Kicinski <kuba@kernel.org> Cc: Jakub Sitnicki <jakub@cloudflare.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Kent Overstreet <kent.overstreet@linux.dev> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Trond Myklebust <trond.myklebust@hammerspace.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
90 lines
2.1 KiB
C
90 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/* A pointer that can point to either kernel or userspace memory. */
|
|
#ifndef _LINUX_BPFPTR_H
|
|
#define _LINUX_BPFPTR_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/sockptr.h>
|
|
|
|
typedef sockptr_t bpfptr_t;
|
|
|
|
static inline bool bpfptr_is_kernel(bpfptr_t bpfptr)
|
|
{
|
|
return bpfptr.is_kernel;
|
|
}
|
|
|
|
static inline bpfptr_t KERNEL_BPFPTR(void *p)
|
|
{
|
|
return (bpfptr_t) { .kernel = p, .is_kernel = true };
|
|
}
|
|
|
|
static inline bpfptr_t USER_BPFPTR(void __user *p)
|
|
{
|
|
return (bpfptr_t) { .user = p };
|
|
}
|
|
|
|
static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel)
|
|
{
|
|
if (is_kernel)
|
|
return KERNEL_BPFPTR((void*) (uintptr_t) addr);
|
|
else
|
|
return USER_BPFPTR(u64_to_user_ptr(addr));
|
|
}
|
|
|
|
static inline bool bpfptr_is_null(bpfptr_t bpfptr)
|
|
{
|
|
if (bpfptr_is_kernel(bpfptr))
|
|
return !bpfptr.kernel;
|
|
return !bpfptr.user;
|
|
}
|
|
|
|
static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
|
|
{
|
|
if (bpfptr_is_kernel(*bpfptr))
|
|
bpfptr->kernel += val;
|
|
else
|
|
bpfptr->user += val;
|
|
}
|
|
|
|
static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
|
|
size_t offset, size_t size)
|
|
{
|
|
if (!bpfptr_is_kernel(src))
|
|
return copy_from_user(dst, src.user + offset, size);
|
|
return copy_from_kernel_nofault(dst, src.kernel + offset, size);
|
|
}
|
|
|
|
static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
|
|
{
|
|
return copy_from_bpfptr_offset(dst, src, 0, size);
|
|
}
|
|
|
|
static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
|
|
const void *src, size_t size)
|
|
{
|
|
return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
|
|
}
|
|
|
|
static inline void *kvmemdup_bpfptr_noprof(bpfptr_t src, size_t len)
|
|
{
|
|
void *p = kvmalloc_noprof(len, GFP_USER | __GFP_NOWARN);
|
|
|
|
if (!p)
|
|
return ERR_PTR(-ENOMEM);
|
|
if (copy_from_bpfptr(p, src, len)) {
|
|
kvfree(p);
|
|
return ERR_PTR(-EFAULT);
|
|
}
|
|
return p;
|
|
}
|
|
#define kvmemdup_bpfptr(...) alloc_hooks(kvmemdup_bpfptr_noprof(__VA_ARGS__))
|
|
|
|
static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
|
|
{
|
|
if (bpfptr_is_kernel(src))
|
|
return strncpy_from_kernel_nofault(dst, src.kernel, count);
|
|
return strncpy_from_user(dst, src.user, count);
|
|
}
|
|
|
|
#endif /* _LINUX_BPFPTR_H */
|