mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-08 14:23:19 +00:00
bpf: Count missed stats in trace_call_bpf
Increase misses stats in case bpf array execution is skipped because of recursion check in trace_call_bpf. Adding bpf_prog_inc_misses_counters that increase misses counts for all bpf programs in bpf_prog_array. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Tested-by: Song Liu <song@kernel.org> Reviewed-by: Song Liu <song@kernel.org> Link: https://lore.kernel.org/bpf/20230920213145.1941596-5-jolsa@kernel.org
This commit is contained in:
parent
3acf8ace68
commit
dd8657894c
@ -2922,6 +2922,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
|
||||
|
||||
static __always_inline void
|
||||
bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
|
||||
{
|
||||
const struct bpf_prog_array_item *item;
|
||||
struct bpf_prog *prog;
|
||||
|
||||
if (unlikely(!array))
|
||||
return;
|
||||
|
||||
item = &array->items[0];
|
||||
while ((prog = READ_ONCE(item->prog))) {
|
||||
bpf_prog_inc_misses_counter(prog);
|
||||
item++;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
|
||||
void bpf_sk_reuseport_detach(struct sock *sk);
|
||||
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
|
||||
|
@ -117,6 +117,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
|
||||
* and don't send kprobe event into ring-buffer,
|
||||
* so return zero here
|
||||
*/
|
||||
rcu_read_lock();
|
||||
bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
|
||||
rcu_read_unlock();
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user