mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 12:16:41 +00:00
perf stat: Move create_perf_stat_counter() to stat.c
Move create_perf_stat_counter() to the 'stat' class, so that we can use it globally. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20180830063252.23729-9-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
650d622046
commit
d09cefd2ef
@ -234,58 +234,6 @@ static void perf_stat__reset_stats(void)
|
||||
perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
|
||||
}
|
||||
|
||||
static int create_perf_stat_counter(struct perf_evsel *evsel,
|
||||
struct perf_stat_config *config)
|
||||
{
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct perf_evsel *leader = evsel->leader;
|
||||
|
||||
if (config->scale) {
|
||||
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
|
||||
PERF_FORMAT_TOTAL_TIME_RUNNING;
|
||||
}
|
||||
|
||||
/*
|
||||
* The event is part of non trivial group, let's enable
|
||||
* the group read (for leader) and ID retrieval for all
|
||||
* members.
|
||||
*/
|
||||
if (leader->nr_members > 1)
|
||||
attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
|
||||
|
||||
attr->inherit = !config->no_inherit;
|
||||
|
||||
/*
|
||||
* Some events get initialized with sample_(period/type) set,
|
||||
* like tracepoints. Clear it up for counting.
|
||||
*/
|
||||
attr->sample_period = 0;
|
||||
|
||||
if (config->identifier)
|
||||
attr->sample_type = PERF_SAMPLE_IDENTIFIER;
|
||||
|
||||
/*
|
||||
* Disabling all counters initially, they will be enabled
|
||||
* either manually by us or by kernel via enable_on_exec
|
||||
* set later.
|
||||
*/
|
||||
if (perf_evsel__is_group_leader(evsel)) {
|
||||
attr->disabled = 1;
|
||||
|
||||
/*
|
||||
* In case of initial_delay we enable tracee
|
||||
* events manually.
|
||||
*/
|
||||
if (target__none(&target) && !config->initial_delay)
|
||||
attr->enable_on_exec = 1;
|
||||
}
|
||||
|
||||
if (target__has_cpu(&target) && !target__has_per_thread(&target))
|
||||
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
|
||||
|
||||
return perf_evsel__open_per_thread(evsel, evsel->threads);
|
||||
}
|
||||
|
||||
static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
|
||||
union perf_event *event,
|
||||
struct perf_sample *sample __maybe_unused,
|
||||
@ -568,7 +516,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
|
||||
|
||||
evlist__for_each_entry(evsel_list, counter) {
|
||||
try_again:
|
||||
if (create_perf_stat_counter(counter, &stat_config) < 0) {
|
||||
if (create_perf_stat_counter(counter, &stat_config, &target) < 0) {
|
||||
|
||||
/* Weak group failed. Reset the group. */
|
||||
if ((errno == EINVAL || errno == EBADF) &&
|
||||
|
@ -435,3 +435,56 @@ size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int create_perf_stat_counter(struct perf_evsel *evsel,
|
||||
struct perf_stat_config *config,
|
||||
struct target *target)
|
||||
{
|
||||
struct perf_event_attr *attr = &evsel->attr;
|
||||
struct perf_evsel *leader = evsel->leader;
|
||||
|
||||
if (config->scale) {
|
||||
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
|
||||
PERF_FORMAT_TOTAL_TIME_RUNNING;
|
||||
}
|
||||
|
||||
/*
|
||||
* The event is part of non trivial group, let's enable
|
||||
* the group read (for leader) and ID retrieval for all
|
||||
* members.
|
||||
*/
|
||||
if (leader->nr_members > 1)
|
||||
attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
|
||||
|
||||
attr->inherit = !config->no_inherit;
|
||||
|
||||
/*
|
||||
* Some events get initialized with sample_(period/type) set,
|
||||
* like tracepoints. Clear it up for counting.
|
||||
*/
|
||||
attr->sample_period = 0;
|
||||
|
||||
if (config->identifier)
|
||||
attr->sample_type = PERF_SAMPLE_IDENTIFIER;
|
||||
|
||||
/*
|
||||
* Disabling all counters initially, they will be enabled
|
||||
* either manually by us or by kernel via enable_on_exec
|
||||
* set later.
|
||||
*/
|
||||
if (perf_evsel__is_group_leader(evsel)) {
|
||||
attr->disabled = 1;
|
||||
|
||||
/*
|
||||
* In case of initial_delay we enable tracee
|
||||
* events manually.
|
||||
*/
|
||||
if (target__none(target) && !config->initial_delay)
|
||||
attr->enable_on_exec = 1;
|
||||
}
|
||||
|
||||
if (target__has_cpu(target) && !target__has_per_thread(target))
|
||||
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
|
||||
|
||||
return perf_evsel__open_per_thread(evsel, evsel->threads);
|
||||
}
|
||||
|
@ -175,4 +175,8 @@ int perf_event__process_stat_event(struct perf_tool *tool,
|
||||
size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp);
|
||||
size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp);
|
||||
size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
|
||||
|
||||
int create_perf_stat_counter(struct perf_evsel *evsel,
|
||||
struct perf_stat_config *config,
|
||||
struct target *target);
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user