mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 05:06:29 +00:00
perf/bpf: Call BPF handler directly, not through overflow machinery
To ultimately allow BPF programs attached to perf events to completely suppress all of the effects of a perf event overflow (rather than just the sample output, as they do today), call bpf_overflow_handler() from __perf_event_overflow() directly rather than modifying struct perf_event's overflow_handler. Return the BPF program's return value from bpf_overflow_handler() so that __perf_event_overflow() knows how to proceed. Remove the now unnecessary orig_overflow_handler from struct perf_event. This patch is solely a refactoring and results in no behavior change. Suggested-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Kyle Huey <khuey@kylehuey.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Song Liu <song@kernel.org> Acked-by: Jiri Olsa <jolsa@kernel.org> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/r/20240412015019.7060-5-khuey@kylehuey.com
This commit is contained in:
parent
14e40a9578
commit
f11f10bfa1
@ -809,7 +809,6 @@ struct perf_event {
|
||||
u64 (*clock)(void);
|
||||
perf_overflow_handler_t overflow_handler;
|
||||
void *overflow_handler_context;
|
||||
perf_overflow_handler_t orig_overflow_handler;
|
||||
struct bpf_prog *prog;
|
||||
u64 bpf_cookie;
|
||||
|
||||
@ -1361,10 +1360,7 @@ __is_default_overflow_handler(perf_overflow_handler_t overflow_handler)
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
static inline bool uses_default_overflow_handler(struct perf_event *event)
|
||||
{
|
||||
if (likely(is_default_overflow_handler(event)))
|
||||
return true;
|
||||
|
||||
return __is_default_overflow_handler(event->orig_overflow_handler);
|
||||
return is_default_overflow_handler(event);
|
||||
}
|
||||
#else
|
||||
#define uses_default_overflow_handler(event) \
|
||||
|
@ -9564,9 +9564,9 @@ static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *r
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
static void bpf_overflow_handler(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
static int bpf_overflow_handler(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct bpf_perf_event_data_kern ctx = {
|
||||
.data = data,
|
||||
@ -9587,10 +9587,8 @@ static void bpf_overflow_handler(struct perf_event *event,
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
__this_cpu_dec(bpf_prog_active);
|
||||
if (!ret)
|
||||
return;
|
||||
|
||||
event->orig_overflow_handler(event, data, regs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int perf_event_set_bpf_handler(struct perf_event *event,
|
||||
@ -9626,8 +9624,6 @@ static int perf_event_set_bpf_handler(struct perf_event *event,
|
||||
|
||||
event->prog = prog;
|
||||
event->bpf_cookie = bpf_cookie;
|
||||
event->orig_overflow_handler = READ_ONCE(event->overflow_handler);
|
||||
WRITE_ONCE(event->overflow_handler, bpf_overflow_handler);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -9638,15 +9634,15 @@ static void perf_event_free_bpf_handler(struct perf_event *event)
|
||||
if (!prog)
|
||||
return;
|
||||
|
||||
WRITE_ONCE(event->overflow_handler, event->orig_overflow_handler);
|
||||
event->prog = NULL;
|
||||
bpf_prog_put(prog);
|
||||
}
|
||||
#else
|
||||
static void bpf_overflow_handler(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
static int bpf_overflow_handler(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int perf_event_set_bpf_handler(struct perf_event *event,
|
||||
@ -9730,7 +9726,8 @@ static int __perf_event_overflow(struct perf_event *event,
|
||||
irq_work_queue(&event->pending_irq);
|
||||
}
|
||||
|
||||
READ_ONCE(event->overflow_handler)(event, data, regs);
|
||||
if (!(event->prog && !bpf_overflow_handler(event, data, regs)))
|
||||
READ_ONCE(event->overflow_handler)(event, data, regs);
|
||||
|
||||
if (*perf_event_fasync(event) && event->pending_kill) {
|
||||
event->pending_wakeup = 1;
|
||||
@ -11997,13 +11994,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
overflow_handler = parent_event->overflow_handler;
|
||||
context = parent_event->overflow_handler_context;
|
||||
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_EVENT_TRACING)
|
||||
if (overflow_handler == bpf_overflow_handler) {
|
||||
if (parent_event->prog) {
|
||||
struct bpf_prog *prog = parent_event->prog;
|
||||
|
||||
bpf_prog_inc(prog);
|
||||
event->prog = prog;
|
||||
event->orig_overflow_handler =
|
||||
parent_event->orig_overflow_handler;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user