perf stat: Remove saved_value/runtime_stat

As saved_value/runtime_stat are only written to and not read, remove
the associated logic and writes.

Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexandre Torgue <alexandre.torgue@foss.st.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Caleb Biggers <caleb.biggers@intel.com>
Cc: Eduard Zingerman <eddyz87@gmail.com>
Cc: Florian Fischer <florian.fischer@muhq.space>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Maxime Coquelin <mcoquelin.stm32@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Perry Taylor <perry.taylor@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-stm32@st-md-mailman.stormreply.com
Link: https://lore.kernel.org/r/20230219092848.639226-52-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Ian Rogers 2023-02-19 01:28:48 -08:00 committed by Arnaldo Carvalho de Melo
parent 0a57b91080
commit aa0964e3ec
7 changed files with 0 additions and 239 deletions

View File

@ -2072,9 +2072,6 @@ static void perf_sample__fprint_metric(struct perf_script *script,
if (evsel_script(leader)->gnum++ == 0) if (evsel_script(leader)->gnum++ == 0)
perf_stat__reset_shadow_stats(); perf_stat__reset_shadow_stats();
val = sample->period * evsel->scale; val = sample->period * evsel->scale;
perf_stat__update_shadow_stats(evsel,
val,
sample->cpu);
evsel_script(evsel)->val = val; evsel_script(evsel)->val = val;
if (evsel_script(leader)->gnum == leader->core.nr_members) { if (evsel_script(leader)->gnum == leader->core.nr_members) {
for_each_group_member (ev2, leader) { for_each_group_member (ev2, leader) {
@ -2792,8 +2789,6 @@ static int __cmd_script(struct perf_script *script)
signal(SIGINT, sig_handler); signal(SIGINT, sig_handler);
perf_stat__init_shadow_stats();
/* override event processing functions */ /* override event processing functions */
if (script->show_task_events) { if (script->show_task_events) {
script->tool.comm = process_comm_event; script->tool.comm = process_comm_event;

View File

@ -424,7 +424,6 @@ static void process_counters(void)
perf_stat_merge_counters(&stat_config, evsel_list); perf_stat_merge_counters(&stat_config, evsel_list);
perf_stat_process_percore(&stat_config, evsel_list); perf_stat_process_percore(&stat_config, evsel_list);
perf_stat_process_shadow_stats(&stat_config, evsel_list);
} }
static void process_interval(void) static void process_interval(void)
@ -434,7 +433,6 @@ static void process_interval(void)
clock_gettime(CLOCK_MONOTONIC, &ts); clock_gettime(CLOCK_MONOTONIC, &ts);
diff_timespec(&rs, &ts, &ref_time); diff_timespec(&rs, &ts, &ref_time);
perf_stat__reset_shadow_per_stat();
evlist__reset_aggr_stats(evsel_list); evlist__reset_aggr_stats(evsel_list);
if (read_counters(&rs) == 0) if (read_counters(&rs) == 0)
@ -910,7 +908,6 @@ try_again_reset:
evlist__copy_prev_raw_counts(evsel_list); evlist__copy_prev_raw_counts(evsel_list);
evlist__reset_prev_raw_counts(evsel_list); evlist__reset_prev_raw_counts(evsel_list);
evlist__reset_aggr_stats(evsel_list); evlist__reset_aggr_stats(evsel_list);
perf_stat__reset_shadow_per_stat();
} else { } else {
update_stats(&walltime_nsecs_stats, t1 - t0); update_stats(&walltime_nsecs_stats, t1 - t0);
update_rusage_stats(&ru_stats, &stat_config.ru_data); update_rusage_stats(&ru_stats, &stat_config.ru_data);
@ -2132,8 +2129,6 @@ static int __cmd_report(int argc, const char **argv)
input_name = "perf.data"; input_name = "perf.data";
} }
perf_stat__init_shadow_stats();
perf_stat.data.path = input_name; perf_stat.data.path = input_name;
perf_stat.data.mode = PERF_DATA_MODE_READ; perf_stat.data.mode = PERF_DATA_MODE_READ;
@ -2413,7 +2408,6 @@ int cmd_stat(int argc, const char **argv)
&stat_config.metric_events); &stat_config.metric_events);
zfree(&metrics); zfree(&metrics);
} }
perf_stat__init_shadow_stats();
if (add_default_attributes()) if (add_default_attributes())
goto out; goto out;

View File

@ -296,7 +296,6 @@ static int test_metric_group(void)
static int test__parse_metric(struct test_suite *test __maybe_unused, int subtest __maybe_unused) static int test__parse_metric(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{ {
perf_stat__init_shadow_stats();
TEST_ASSERT_VAL("IPC failed", test_ipc() == 0); TEST_ASSERT_VAL("IPC failed", test_ipc() == 0);
TEST_ASSERT_VAL("frontend failed", test_frontend() == 0); TEST_ASSERT_VAL("frontend failed", test_frontend() == 0);
TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0); TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0);

View File

@ -905,7 +905,6 @@ static int test__parsing(struct test_suite *test __maybe_unused,
{ {
int failures = 0; int failures = 0;
perf_stat__init_shadow_stats();
pmu_for_each_core_metric(test__parsing_callback, &failures); pmu_for_each_core_metric(test__parsing_callback, &failures);
pmu_for_each_sys_metric(test__parsing_callback, &failures); pmu_for_each_sys_metric(test__parsing_callback, &failures);

View File

@ -16,22 +16,9 @@
#include "iostat.h" #include "iostat.h"
#include "util/hashmap.h" #include "util/hashmap.h"
/*
* AGGR_GLOBAL: Use CPU 0
* AGGR_SOCKET: Use first CPU of socket
* AGGR_DIE: Use first CPU of die
* AGGR_CORE: Use first CPU of core
* AGGR_NONE: Use matching CPU
* AGGR_THREAD: Not supported?
*/
struct stats walltime_nsecs_stats; struct stats walltime_nsecs_stats;
struct rusage_stats ru_stats; struct rusage_stats ru_stats;
static struct runtime_stat {
struct rblist value_list;
} rt_stat;
enum { enum {
CTX_BIT_USER = 1 << 0, CTX_BIT_USER = 1 << 0,
CTX_BIT_KERNEL = 1 << 1, CTX_BIT_KERNEL = 1 << 1,
@ -65,117 +52,6 @@ enum stat_type {
STAT_MAX STAT_MAX
}; };
struct saved_value {
struct rb_node rb_node;
struct evsel *evsel;
enum stat_type type;
int ctx;
int map_idx; /* cpu or thread map index */
struct cgroup *cgrp;
struct stats stats;
u64 metric_total;
int metric_other;
};
static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
{
struct saved_value *a = container_of(rb_node,
struct saved_value,
rb_node);
const struct saved_value *b = entry;
if (a->map_idx != b->map_idx)
return a->map_idx - b->map_idx;
/*
* Previously the rbtree was used to link generic metrics.
* The keys were evsel/cpu. Now the rbtree is extended to support
* per-thread shadow stats. For shadow stats case, the keys
* are cpu/type/ctx/stat (evsel is NULL). For generic metrics
* case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
*/
if (a->type != b->type)
return a->type - b->type;
if (a->ctx != b->ctx)
return a->ctx - b->ctx;
if (a->cgrp != b->cgrp)
return (char *)a->cgrp < (char *)b->cgrp ? -1 : +1;
if (a->evsel == b->evsel)
return 0;
if ((char *)a->evsel < (char *)b->evsel)
return -1;
return +1;
}
static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
const void *entry)
{
struct saved_value *nd = malloc(sizeof(struct saved_value));
if (!nd)
return NULL;
memcpy(nd, entry, sizeof(struct saved_value));
return &nd->rb_node;
}
static void saved_value_delete(struct rblist *rblist __maybe_unused,
struct rb_node *rb_node)
{
struct saved_value *v;
BUG_ON(!rb_node);
v = container_of(rb_node, struct saved_value, rb_node);
free(v);
}
static struct saved_value *saved_value_lookup(struct evsel *evsel,
int map_idx,
bool create,
enum stat_type type,
int ctx,
struct cgroup *cgrp)
{
struct rblist *rblist;
struct rb_node *nd;
struct saved_value dm = {
.map_idx = map_idx,
.evsel = evsel,
.type = type,
.ctx = ctx,
.cgrp = cgrp,
};
rblist = &rt_stat.value_list;
/* don't use context info for clock events */
if (type == STAT_NSECS)
dm.ctx = 0;
nd = rblist__find(rblist, &dm);
if (nd)
return container_of(nd, struct saved_value, rb_node);
if (create) {
rblist__add_node(rblist, &dm);
nd = rblist__find(rblist, &dm);
if (nd)
return container_of(nd, struct saved_value, rb_node);
}
return NULL;
}
void perf_stat__init_shadow_stats(void)
{
struct rblist *rblist = &rt_stat.value_list;
rblist__init(rblist);
rblist->node_cmp = saved_value_cmp;
rblist->node_new = saved_value_new;
rblist->node_delete = saved_value_delete;
}
static int evsel_context(const struct evsel *evsel) static int evsel_context(const struct evsel *evsel)
{ {
int ctx = 0; int ctx = 0;
@ -194,86 +70,12 @@ static int evsel_context(const struct evsel *evsel)
return ctx; return ctx;
} }
void perf_stat__reset_shadow_per_stat(void)
{
struct rblist *rblist;
struct rb_node *pos, *next;
rblist = &rt_stat.value_list;
next = rb_first_cached(&rblist->entries);
while (next) {
pos = next;
next = rb_next(pos);
memset(&container_of(pos, struct saved_value, rb_node)->stats,
0,
sizeof(struct stats));
}
}
void perf_stat__reset_shadow_stats(void) void perf_stat__reset_shadow_stats(void)
{ {
perf_stat__reset_shadow_per_stat();
memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats)); memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
memset(&ru_stats, 0, sizeof(ru_stats)); memset(&ru_stats, 0, sizeof(ru_stats));
} }
struct runtime_stat_data {
int ctx;
struct cgroup *cgrp;
};
static void update_runtime_stat(enum stat_type type,
int map_idx, u64 count,
struct runtime_stat_data *rsd)
{
struct saved_value *v = saved_value_lookup(NULL, map_idx, true, type,
rsd->ctx, rsd->cgrp);
if (v)
update_stats(&v->stats, count);
}
/*
* Update various tracking values we maintain to print
* more semantic information such as miss/hit ratios,
* instruction rates, etc:
*/
void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
int aggr_idx)
{
u64 count_ns = count;
struct runtime_stat_data rsd = {
.ctx = evsel_context(counter),
.cgrp = counter->cgrp,
};
count *= counter->scale;
if (evsel__is_clock(counter))
update_runtime_stat(STAT_NSECS, aggr_idx, count_ns, &rsd);
else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_runtime_stat(STAT_CYCLES, aggr_idx, count, &rsd);
else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
update_runtime_stat(STAT_STALLED_CYCLES_FRONT,
aggr_idx, count, &rsd);
else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
update_runtime_stat(STAT_STALLED_CYCLES_BACK,
aggr_idx, count, &rsd);
else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
update_runtime_stat(STAT_BRANCHES, aggr_idx, count, &rsd);
else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
update_runtime_stat(STAT_CACHE_REFS, aggr_idx, count, &rsd);
else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
update_runtime_stat(STAT_L1_DCACHE, aggr_idx, count, &rsd);
else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
update_runtime_stat(STAT_L1_ICACHE, aggr_idx, count, &rsd);
else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL))
update_runtime_stat(STAT_LL_CACHE, aggr_idx, count, &rsd);
else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
update_runtime_stat(STAT_DTLB_CACHE, aggr_idx, count, &rsd);
else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
update_runtime_stat(STAT_ITLB_CACHE, aggr_idx, count, &rsd);
}
static enum stat_type evsel__stat_type(const struct evsel *evsel) static enum stat_type evsel__stat_type(const struct evsel *evsel)
{ {
/* Fake perf_hw_cache_op_id values for use with evsel__match. */ /* Fake perf_hw_cache_op_id values for use with evsel__match. */

View File

@ -648,30 +648,6 @@ void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *e
evsel__process_percore(evsel); evsel__process_percore(evsel);
} }
static void evsel__update_shadow_stats(struct evsel *evsel)
{
struct perf_stat_evsel *ps = evsel->stats;
int aggr_idx;
if (ps->aggr == NULL)
return;
for (aggr_idx = 0; aggr_idx < ps->nr_aggr; aggr_idx++) {
struct perf_counts_values *aggr_counts = &ps->aggr[aggr_idx].counts;
perf_stat__update_shadow_stats(evsel, aggr_counts->val, aggr_idx);
}
}
void perf_stat_process_shadow_stats(struct perf_stat_config *config __maybe_unused,
struct evlist *evlist)
{
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel)
evsel__update_shadow_stats(evsel);
}
int perf_event__process_stat_event(struct perf_session *session, int perf_event__process_stat_event(struct perf_session *session,
union perf_event *event) union perf_event *event)
{ {

View File

@ -157,10 +157,7 @@ typedef void (*print_metric_t)(struct perf_stat_config *config,
const char *fmt, double val); const char *fmt, double val);
typedef void (*new_line_t)(struct perf_stat_config *config, void *ctx); typedef void (*new_line_t)(struct perf_stat_config *config, void *ctx);
void perf_stat__init_shadow_stats(void);
void perf_stat__reset_shadow_stats(void); void perf_stat__reset_shadow_stats(void);
void perf_stat__reset_shadow_per_stat(void);
void perf_stat__update_shadow_stats(struct evsel *counter, u64 count, int aggr_idx);
struct perf_stat_output_ctx { struct perf_stat_output_ctx {
void *ctx; void *ctx;
print_metric_t print_metric; print_metric_t print_metric;
@ -189,7 +186,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
struct evsel *counter); struct evsel *counter);
void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist); void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist);
void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist); void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist);
void perf_stat_process_shadow_stats(struct perf_stat_config *config, struct evlist *evlist);
struct perf_tool; struct perf_tool;
union perf_event; union perf_event;