linux-next/include/linux/hid_bpf.h
Suren Baghdasaryan 2c321f3f70 mm: change inlined allocation helpers to account at the call site
Main goal of memory allocation profiling patchset is to provide accounting
that is cheap enough to run in production.  To achieve that we inject
counters using codetags at the allocation call sites to account every time
allocation is made.  This injection allows us to perform accounting
efficiently because injected counters are immediately available as opposed
to the alternative methods, such as using _RET_IP_, which would require
counter lookup and appropriate locking that makes accounting much more
expensive.  This method requires all allocation functions to inject
separate counters at their call sites so that their callers can be
individually accounted.  Counter injection is implemented by allocation
hooks which should wrap all allocation functions.

Inlined functions which perform allocations but do not use allocation
hooks are directly charged for the allocations they perform.  In most
cases these functions are just specialized allocation wrappers used from
multiple places to allocate objects of a specific type.  It would be more
useful to do the accounting at their call sites instead.  Instrument these
helpers to do accounting at the call site.  Simple inlined allocation
wrappers are converted directly into macros.  More complex allocators or
allocators with documentation are converted into _noprof versions and
allocation hooks are added.  This allows memory allocation profiling
mechanism to charge allocations to the callers of these functions.

Link: https://lkml.kernel.org/r/20240415020731.1152108-1-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Acked-by: Jan Kara <jack@suse.cz>		[jbd2]
Cc: Anna Schumaker <anna@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Jakub Sitnicki <jakub@cloudflare.com>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2024-04-25 20:55:59 -07:00

158 lines
5.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __HID_BPF_H
#define __HID_BPF_H
#include <linux/bpf.h>
#include <linux/spinlock.h>
#include <uapi/linux/hid.h>
struct hid_device;
/*
* The following is the user facing HID BPF API.
*
* Extra care should be taken when editing this part, as
* it might break existing out of the tree bpf programs.
*/
/**
* struct hid_bpf_ctx - User accessible data for all HID programs
*
* ``data`` is not directly accessible from the context. We need to issue
* a call to ``hid_bpf_get_data()`` in order to get a pointer to that field.
*
* All of these fields are currently read-only.
*
* @index: program index in the jump table. No special meaning (a smaller index
* doesn't mean the program will be executed before another program with
* a bigger index).
* @hid: the ``struct hid_device`` representing the device itself
* @report_type: used for ``hid_bpf_device_event()``
* @allocated_size: Allocated size of data.
*
* This is how much memory is available and can be requested
* by the HID program.
* Note that for ``HID_BPF_RDESC_FIXUP``, that memory is set to
* ``4096`` (4 KB)
* @size: Valid data in the data field.
*
* Programs can get the available valid size in data by fetching this field.
* Programs can also change this value by returning a positive number in the
* program.
* To discard the event, return a negative error code.
*
* ``size`` must always be less or equal than ``allocated_size`` (it is enforced
* once all BPF programs have been run).
* @retval: Return value of the previous program.
*/
struct hid_bpf_ctx {
__u32 index;
const struct hid_device *hid;
__u32 allocated_size;
enum hid_report_type report_type;
union {
__s32 retval;
__s32 size;
};
};
/**
* enum hid_bpf_attach_flags - flags used when attaching a HIF-BPF program
*
* @HID_BPF_FLAG_NONE: no specific flag is used, the kernel choses where to
* insert the program
* @HID_BPF_FLAG_INSERT_HEAD: insert the given program before any other program
* currently attached to the device. This doesn't
* guarantee that this program will always be first
* @HID_BPF_FLAG_MAX: sentinel value, not to be used by the callers
*/
enum hid_bpf_attach_flags {
HID_BPF_FLAG_NONE = 0,
HID_BPF_FLAG_INSERT_HEAD = _BITUL(0),
HID_BPF_FLAG_MAX,
};
/* Following functions are tracepoints that BPF programs can attach to */
int hid_bpf_device_event(struct hid_bpf_ctx *ctx);
int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx);
/*
* Below is HID internal
*/
/* internal function to call eBPF programs, not to be used by anybody */
int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx);
#define HID_BPF_MAX_PROGS_PER_DEV 64
#define HID_BPF_FLAG_MASK (((HID_BPF_FLAG_MAX - 1) << 1) - 1)
/* types of HID programs to attach to */
enum hid_bpf_prog_type {
HID_BPF_PROG_TYPE_UNDEF = -1,
HID_BPF_PROG_TYPE_DEVICE_EVENT, /* an event is emitted from the device */
HID_BPF_PROG_TYPE_RDESC_FIXUP,
HID_BPF_PROG_TYPE_MAX,
};
struct hid_report_enum;
struct hid_bpf_ops {
struct hid_report *(*hid_get_report)(struct hid_report_enum *report_enum, const u8 *data);
int (*hid_hw_raw_request)(struct hid_device *hdev,
unsigned char reportnum, __u8 *buf,
size_t len, enum hid_report_type rtype,
enum hid_class_request reqtype);
struct module *owner;
const struct bus_type *bus_type;
};
extern struct hid_bpf_ops *hid_bpf_ops;
struct hid_bpf_prog_list {
u16 prog_idx[HID_BPF_MAX_PROGS_PER_DEV];
u8 prog_cnt;
};
/* stored in each device */
struct hid_bpf {
u8 *device_data; /* allocated when a bpf program of type
* SEC(f.../hid_bpf_device_event) has been attached
* to this HID device
*/
u32 allocated_data;
struct hid_bpf_prog_list __rcu *progs[HID_BPF_PROG_TYPE_MAX]; /* attached BPF progs */
bool destroyed; /* prevents the assignment of any progs */
spinlock_t progs_lock; /* protects RCU update of progs */
};
/* specific HID-BPF link when a program is attached to a device */
struct hid_bpf_link {
struct bpf_link link;
int hid_table_index;
};
#ifdef CONFIG_HID_BPF
u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data,
u32 *size, int interrupt);
int hid_bpf_connect_device(struct hid_device *hdev);
void hid_bpf_disconnect_device(struct hid_device *hdev);
void hid_bpf_destroy_device(struct hid_device *hid);
void hid_bpf_device_init(struct hid_device *hid);
u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size);
#else /* CONFIG_HID_BPF */
static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type,
u8 *data, u32 *size, int interrupt) { return data; }
static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; }
static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {}
static inline void hid_bpf_destroy_device(struct hid_device *hid) {}
static inline void hid_bpf_device_init(struct hid_device *hid) {}
#define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \
((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL))
#endif /* CONFIG_HID_BPF */
#endif /* __HID_BPF_H */