mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 21:35:07 +00:00
perf bpf-filter: Pass 'target' to perf_bpf_filter__prepare()
This is needed to prepare target-specific actions in the later patch. We want to reuse the pinned BPF program and map for regular users to profile their own processes. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: KP Singh <kpsingh@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: https://lore.kernel.org/r/20240703223035.2024586-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
edb08cdd10
commit
966854e72f
@ -1389,7 +1389,7 @@ try_again:
|
||||
"even with a suitable vmlinux or kallsyms file.\n\n");
|
||||
}
|
||||
|
||||
if (evlist__apply_filters(evlist, &pos)) {
|
||||
if (evlist__apply_filters(evlist, &pos, &opts->target)) {
|
||||
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
|
||||
pos->filter ?: "BPF", evsel__name(pos), errno,
|
||||
str_error_r(errno, msg, sizeof(msg)));
|
||||
|
@ -833,7 +833,7 @@ try_again_reset:
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (evlist__apply_filters(evsel_list, &counter)) {
|
||||
if (evlist__apply_filters(evsel_list, &counter, &target)) {
|
||||
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
|
||||
counter->filter, evsel__name(counter), errno,
|
||||
str_error_r(errno, msg, sizeof(msg)));
|
||||
|
@ -1055,7 +1055,7 @@ try_again:
|
||||
}
|
||||
}
|
||||
|
||||
if (evlist__apply_filters(evlist, &counter)) {
|
||||
if (evlist__apply_filters(evlist, &counter, &opts->target)) {
|
||||
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
|
||||
counter->filter ?: "BPF", evsel__name(counter), errno,
|
||||
str_error_r(errno, msg, sizeof(msg)));
|
||||
|
@ -4135,7 +4135,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
|
||||
err = trace__expand_filters(trace, &evsel);
|
||||
if (err)
|
||||
goto out_delete_evlist;
|
||||
err = evlist__apply_filters(evlist, &evsel);
|
||||
err = evlist__apply_filters(evlist, &evsel, &trace->opts.target);
|
||||
if (err < 0)
|
||||
goto out_error_apply_filters;
|
||||
|
||||
|
@ -91,7 +91,7 @@ static int check_sample_flags(struct evsel *evsel, struct perf_bpf_filter_expr *
|
||||
return -1;
|
||||
}
|
||||
|
||||
int perf_bpf_filter__prepare(struct evsel *evsel)
|
||||
int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target __maybe_unused)
|
||||
{
|
||||
int i, x, y, fd, ret;
|
||||
struct sample_filter_bpf *skel;
|
||||
|
@ -16,6 +16,7 @@ struct perf_bpf_filter_expr {
|
||||
};
|
||||
|
||||
struct evsel;
|
||||
struct target;
|
||||
|
||||
#ifdef HAVE_BPF_SKEL
|
||||
struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term term,
|
||||
@ -23,7 +24,7 @@ struct perf_bpf_filter_expr *perf_bpf_filter_expr__new(enum perf_bpf_filter_term
|
||||
enum perf_bpf_filter_op op,
|
||||
unsigned long val);
|
||||
int perf_bpf_filter__parse(struct list_head *expr_head, const char *str);
|
||||
int perf_bpf_filter__prepare(struct evsel *evsel);
|
||||
int perf_bpf_filter__prepare(struct evsel *evsel, struct target *target);
|
||||
int perf_bpf_filter__destroy(struct evsel *evsel);
|
||||
u64 perf_bpf_filter__lost_count(struct evsel *evsel);
|
||||
|
||||
@ -34,7 +35,8 @@ static inline int perf_bpf_filter__parse(struct list_head *expr_head __maybe_unu
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static inline int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused)
|
||||
static inline int perf_bpf_filter__prepare(struct evsel *evsel __maybe_unused,
|
||||
struct target *target __maybe_unused)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -1086,7 +1086,8 @@ out_delete_threads:
|
||||
return -1;
|
||||
}
|
||||
|
||||
int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
|
||||
int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel,
|
||||
struct target *target)
|
||||
{
|
||||
struct evsel *evsel;
|
||||
int err = 0;
|
||||
@ -1108,7 +1109,7 @@ int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
|
||||
* non-tracepoint events can have BPF filters.
|
||||
*/
|
||||
if (!list_empty(&evsel->bpf_filters)) {
|
||||
err = perf_bpf_filter__prepare(evsel);
|
||||
err = perf_bpf_filter__prepare(evsel, target);
|
||||
if (err) {
|
||||
*err_evsel = evsel;
|
||||
break;
|
||||
|
@ -20,6 +20,7 @@ struct pollfd;
|
||||
struct thread_map;
|
||||
struct perf_cpu_map;
|
||||
struct record_opts;
|
||||
struct target;
|
||||
|
||||
/*
|
||||
* State machine of bkw_mmap_state:
|
||||
@ -212,7 +213,8 @@ void evlist__enable_non_dummy(struct evlist *evlist);
|
||||
void evlist__set_selected(struct evlist *evlist, struct evsel *evsel);
|
||||
|
||||
int evlist__create_maps(struct evlist *evlist, struct target *target);
|
||||
int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel);
|
||||
int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel,
|
||||
struct target *target);
|
||||
|
||||
u64 __evlist__combined_sample_type(struct evlist *evlist);
|
||||
u64 evlist__combined_sample_type(struct evlist *evlist);
|
||||
|
Loading…
x
Reference in New Issue
Block a user