mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-16 18:26:42 +00:00
0f223813ed
The 'perf ftrace profile' command is to get function execution profiles using function-graph tracer so that users can see the total, average, max execution time as well as the number of invocations easily. The following is a profile for the perf_event_open syscall. $ sudo perf ftrace profile -G __x64_sys_perf_event_open -- \ perf stat -e cycles -C1 true 2> /dev/null | head # Total (us) Avg (us) Max (us) Count Function 65.611 65.611 65.611 1 __x64_sys_perf_event_open 30.527 30.527 30.527 1 anon_inode_getfile 30.260 30.260 30.260 1 __anon_inode_getfile 29.700 29.700 29.700 1 alloc_file_pseudo 17.578 17.578 17.578 1 d_alloc_pseudo 17.382 17.382 17.382 1 __d_alloc 16.738 16.738 16.738 1 kmem_cache_alloc_lru 15.686 15.686 15.686 1 perf_event_alloc 14.012 7.006 11.264 2 obj_cgroup_charge # Reviewed-by: Ian Rogers <irogers@google.com> Signed-off-by: Namhyung Kim <namhyung@kernel.org> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Changbin Du <changbin.du@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt (VMware) <rostedt@goodmis.org> Link: https://lore.kernel.org/lkml/20240729004127.238611-4-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
85 lines
1.8 KiB
C
85 lines
1.8 KiB
C
#ifndef __PERF_FTRACE_H__
|
|
#define __PERF_FTRACE_H__
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include "target.h"
|
|
|
|
struct evlist;
|
|
struct hashamp;
|
|
|
|
struct perf_ftrace {
|
|
struct evlist *evlist;
|
|
struct target target;
|
|
const char *tracer;
|
|
struct list_head filters;
|
|
struct list_head notrace;
|
|
struct list_head graph_funcs;
|
|
struct list_head nograph_funcs;
|
|
struct hashmap *profile_hash;
|
|
unsigned long percpu_buffer_size;
|
|
bool inherit;
|
|
bool use_nsec;
|
|
int graph_depth;
|
|
int func_stack_trace;
|
|
int func_irq_info;
|
|
int graph_nosleep_time;
|
|
int graph_noirqs;
|
|
int graph_verbose;
|
|
int graph_thresh;
|
|
int graph_tail;
|
|
};
|
|
|
|
struct filter_entry {
|
|
struct list_head list;
|
|
char name[];
|
|
};
|
|
|
|
#define NUM_BUCKET 22 /* 20 + 2 (for outliers in both direction) */
|
|
|
|
#ifdef HAVE_BPF_SKEL
|
|
|
|
int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace);
|
|
int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace);
|
|
int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace);
|
|
int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
|
|
int buckets[]);
|
|
int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace);
|
|
|
|
#else /* !HAVE_BPF_SKEL */
|
|
|
|
static inline int
|
|
perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace __maybe_unused)
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
static inline int
|
|
perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
static inline int
|
|
perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
static inline int
|
|
perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
|
|
int buckets[] __maybe_unused)
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
static inline int
|
|
perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
|
|
{
|
|
return -1;
|
|
}
|
|
|
|
#endif /* HAVE_BPF_SKEL */
|
|
|
|
#endif /* __PERF_FTRACE_H__ */
|