perf tool_pmu: Move expr literals to tool_pmu

Add the expr literals like "#smt_on" as tool events, this allows stat
events to give the values. On my laptop with hyperthreading enabled:

```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true

 Performance counter stats for 'true':

                 0      has_pmem
                 8      num_cores
                16      num_cpus
                16      num_cpus_online
                 1      num_dies
                 1      num_packages
                 1      smt_on
     2,496,000,000      system_tsc_freq

       0.001113637 seconds time elapsed

       0.001218000 seconds user
       0.000000000 seconds sys
```

And with hyperthreading disabled:
```
$ perf stat -e "has_pmem,num_cores,num_cpus,num_cpus_online,num_dies,num_packages,smt_on,system_tsc_freq" true

 Performance counter stats for 'true':

                 0      has_pmem
                 8      num_cores
                16      num_cpus
                 8      num_cpus_online
                 1      num_dies
                 1      num_packages
                 0      smt_on
     2,496,000,000      system_tsc_freq

       0.000802115 seconds time elapsed

       0.000000000 seconds user
       0.000806000 seconds sys
```

As zero matters for these values, in stat-display
should_skip_zero_counter only skip the zero value if it is not the
first aggregation index.

The tool event implementations are used in expr but not evaluated as
events for simplicity. Also core_wide isn't made a tool event as it
requires command line parameters.

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20241002032016.333748-8-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
This commit is contained in:
Ian Rogers 2024-10-01 20:20:10 -07:00 committed by Namhyung Kim
parent b8f1a1b068
commit 069057239a
10 changed files with 201 additions and 107 deletions

View File

@ -5,6 +5,7 @@
#include "../../../util/header.h"
#include "../../../util/pmu.h"
#include "../../../util/pmus.h"
#include "../../../util/tool_pmu.h"
#include <api/fs/fs.h>
#include <math.h>
@ -24,7 +25,7 @@ const struct pmu_metrics_table *pmu_metrics_table__find(void)
return NULL;
}
double perf_pmu__cpu_slots_per_cycle(void)
u64 tool_pmu__cpu_slots_per_cycle(void)
{
char path[PATH_MAX];
unsigned long long slots = 0;
@ -41,5 +42,5 @@ double perf_pmu__cpu_slots_per_cycle(void)
filename__read_ull(path, &slots);
}
return slots ? (double)slots : NAN;
return slots;
}

View File

@ -24,9 +24,9 @@ u64 rdtsc(void)
* ...
* will return 3000000000.
*/
static double cpuinfo_tsc_freq(void)
static u64 cpuinfo_tsc_freq(void)
{
double result = 0;
u64 result = 0;
FILE *cpuinfo;
char *line = NULL;
size_t len = 0;
@ -34,20 +34,22 @@ static double cpuinfo_tsc_freq(void)
cpuinfo = fopen("/proc/cpuinfo", "r");
if (!cpuinfo) {
pr_err("Failed to read /proc/cpuinfo for TSC frequency\n");
return NAN;
return 0;
}
while (getline(&line, &len, cpuinfo) > 0) {
if (!strncmp(line, "model name", 10)) {
char *pos = strstr(line + 11, " @ ");
double float_result;
if (pos && sscanf(pos, " @ %lfGHz", &result) == 1) {
result *= 1000000000;
if (pos && sscanf(pos, " @ %lfGHz", &float_result) == 1) {
float_result *= 1000000000;
result = (u64)float_result;
goto out;
}
}
}
out:
if (fpclassify(result) == FP_ZERO)
if (result == 0)
pr_err("Failed to find TSC frequency in /proc/cpuinfo\n");
free(line);
@ -55,7 +57,7 @@ static double cpuinfo_tsc_freq(void)
return result;
}
double arch_get_tsc_freq(void)
u64 arch_get_tsc_freq(void)
{
unsigned int a, b, c, d, lvl;
static bool cached;
@ -86,6 +88,6 @@ double arch_get_tsc_freq(void)
return tsc;
}
tsc = (double)c * (double)b / (double)a;
tsc = (u64)c * (u64)b / (u64)a;
return tsc;
}

View File

@ -5,25 +5,22 @@
#include <stdlib.h>
#include <string.h>
#include "metricgroup.h"
#include "cpumap.h"
#include "cputopo.h"
#include "debug.h"
#include "evlist.h"
#include "expr.h"
#include "pmu.h"
#include "smt.h"
#include "tool_pmu.h"
#include <util/expr-bison.h>
#include <util/expr-flex.h>
#include "util/hashmap.h"
#include "util/header.h"
#include "util/pmu.h"
#include "smt.h"
#include "tsc.h"
#include <api/fs/fs.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <ctype.h>
#include <math.h>
#include "pmu.h"
struct expr_id_data {
union {
@ -393,90 +390,26 @@ double expr_id_data__source_count(const struct expr_id_data *data)
return data->val.source_count;
}
#if !defined(__i386__) && !defined(__x86_64__)
double arch_get_tsc_freq(void)
{
return 0.0;
}
#endif
static double has_pmem(void)
{
static bool has_pmem, cached;
const char *sysfs = sysfs__mountpoint();
char path[PATH_MAX];
if (!cached) {
snprintf(path, sizeof(path), "%s/firmware/acpi/tables/NFIT", sysfs);
has_pmem = access(path, F_OK) == 0;
cached = true;
}
return has_pmem ? 1.0 : 0.0;
}
double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx)
{
const struct cpu_topology *topology;
double result = NAN;
enum tool_pmu_event ev = tool_pmu__str_to_event(literal + 1);
if (!strcmp("#num_cpus", literal)) {
result = cpu__max_present_cpu().cpu;
goto out;
}
if (!strcmp("#num_cpus_online", literal)) {
struct perf_cpu_map *online = cpu_map__online();
if (ev != TOOL_PMU__EVENT_NONE) {
u64 count;
if (online)
result = perf_cpu_map__nr(online);
goto out;
}
if (tool_pmu__read_event(ev, &count))
result = count;
else
pr_err("Failure to read '%s'", literal);
if (!strcasecmp("#system_tsc_freq", literal)) {
result = arch_get_tsc_freq();
goto out;
}
/*
* Assume that topology strings are consistent, such as CPUs "0-1"
* wouldn't be listed as "0,1", and so after deduplication the number of
* these strings gives an indication of the number of packages, dies,
* etc.
*/
if (!strcasecmp("#smt_on", literal)) {
result = smt_on() ? 1.0 : 0.0;
goto out;
}
if (!strcmp("#core_wide", literal)) {
} else if (!strcmp("#core_wide", literal)) {
result = core_wide(ctx->system_wide, ctx->user_requested_cpu_list)
? 1.0 : 0.0;
goto out;
}
if (!strcmp("#num_packages", literal)) {
topology = online_topology();
result = topology->package_cpus_lists;
goto out;
}
if (!strcmp("#num_dies", literal)) {
topology = online_topology();
result = topology->die_cpus_lists;
goto out;
}
if (!strcmp("#num_cores", literal)) {
topology = online_topology();
result = topology->core_cpus_lists;
goto out;
}
if (!strcmp("#slots", literal)) {
result = perf_pmu__cpu_slots_per_cycle();
goto out;
}
if (!strcmp("#has_pmem", literal)) {
result = has_pmem();
goto out;
} else {
pr_err("Unrecognized literal '%s'", literal);
}
pr_err("Unrecognized literal '%s'", literal);
out:
pr_debug2("literal: %s = %f\n", literal, result);
return result;
}

View File

@ -2254,11 +2254,6 @@ bool perf_pmu__match(const struct perf_pmu *pmu, const char *tok)
(need_fnmatch && !fnmatch(tok, name, 0));
}
double __weak perf_pmu__cpu_slots_per_cycle(void)
{
return NAN;
}
int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size)
{
const char *sysfs = sysfs__mountpoint();

View File

@ -271,7 +271,6 @@ void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
bool perf_pmu__match(const struct perf_pmu *pmu, const char *tok);
double perf_pmu__cpu_slots_per_cycle(void);
int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size);
int perf_pmu__pathname_scnprintf(char *buf, size_t size,
const char *pmu_name, const char *filename);

View File

@ -987,8 +987,12 @@ static bool should_skip_zero_counter(struct perf_stat_config *config,
* Many tool events are only gathered on the first index, skip other
* zero values.
*/
if (evsel__is_tool(counter))
return true;
if (evsel__is_tool(counter)) {
struct aggr_cpu_id own_id =
config->aggr_get_id(config, (struct perf_cpu){ .cpu = 0 });
return !aggr_cpu_id__equal(id, &own_id);
}
/*
* Skip value 0 when it's an uncore event and the given aggr id

View File

@ -400,8 +400,17 @@ static int prepare_metric(const struct metric_expr *mexp,
case TOOL_PMU__EVENT_MAX:
pr_err("Invalid tool event 'max'");
abort();
case TOOL_PMU__EVENT_HAS_PMEM:
case TOOL_PMU__EVENT_NUM_CORES:
case TOOL_PMU__EVENT_NUM_CPUS:
case TOOL_PMU__EVENT_NUM_CPUS_ONLINE:
case TOOL_PMU__EVENT_NUM_DIES:
case TOOL_PMU__EVENT_NUM_PACKAGES:
case TOOL_PMU__EVENT_SLOTS:
case TOOL_PMU__EVENT_SMT_ON:
case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ:
default:
pr_err("Unknown tool event '%s'", evsel__name(metric_events[i]));
pr_err("Unexpected tool event '%s'", evsel__name(metric_events[i]));
abort();
}
val = avg_stats(stats) * scale;

View File

@ -1,11 +1,16 @@
// SPDX-License-Identifier: GPL-2.0-only
#include "cgroup.h"
#include "counts.h"
#include "cputopo.h"
#include "evsel.h"
#include "pmu.h"
#include "print-events.h"
#include "smt.h"
#include "time-utils.h"
#include "tool_pmu.h"
#include "tsc.h"
#include <api/fs/fs.h>
#include <api/io.h>
#include <api/io.h>
#include <internal/threadmap.h>
#include <perf/threadmap.h>
@ -17,6 +22,15 @@ static const char *const tool_pmu__event_names[TOOL_PMU__EVENT_MAX] = {
"duration_time",
"user_time",
"system_time",
"has_pmem",
"num_cores",
"num_cpus",
"num_cpus_online",
"num_dies",
"num_packages",
"slots",
"smt_on",
"system_tsc_freq",
};
@ -33,8 +47,14 @@ enum tool_pmu_event tool_pmu__str_to_event(const char *str)
int i;
tool_pmu__for_each_event(i) {
if (!strcasecmp(str, tool_pmu__event_names[i]))
if (!strcasecmp(str, tool_pmu__event_names[i])) {
#if !defined(__aarch64__)
/* The slots event should only appear on arm64. */
if (i == TOOL_PMU__EVENT_SLOTS)
return TOOL_PMU__EVENT_NONE;
#endif
return i;
}
}
return TOOL_PMU__EVENT_NONE;
}
@ -250,6 +270,9 @@ int evsel__tool_pmu_open(struct evsel *evsel,
enum tool_pmu_event ev = evsel__tool_event(evsel);
int pid = -1, idx = 0, thread = 0, nthreads, err = 0, old_errno;
if (ev == TOOL_PMU__EVENT_NUM_CPUS)
return 0;
if (ev == TOOL_PMU__EVENT_DURATION_TIME) {
if (evsel->core.attr.sample_period) /* no sampling */
return -EINVAL;
@ -328,16 +351,133 @@ int evsel__tool_pmu_open(struct evsel *evsel,
return err;
}
#if !defined(__i386__) && !defined(__x86_64__)
u64 arch_get_tsc_freq(void)
{
return 0;
}
#endif
#if !defined(__aarch64__)
u64 tool_pmu__cpu_slots_per_cycle(void)
{
return 0;
}
#endif
static bool has_pmem(void)
{
static bool has_pmem, cached;
const char *sysfs = sysfs__mountpoint();
char path[PATH_MAX];
if (!cached) {
snprintf(path, sizeof(path), "%s/firmware/acpi/tables/NFIT", sysfs);
has_pmem = access(path, F_OK) == 0;
cached = true;
}
return has_pmem;
}
bool tool_pmu__read_event(enum tool_pmu_event ev, u64 *result)
{
const struct cpu_topology *topology;
switch (ev) {
case TOOL_PMU__EVENT_HAS_PMEM:
*result = has_pmem() ? 1 : 0;
return true;
case TOOL_PMU__EVENT_NUM_CORES:
topology = online_topology();
*result = topology->core_cpus_lists;
return true;
case TOOL_PMU__EVENT_NUM_CPUS:
*result = cpu__max_present_cpu().cpu;
return true;
case TOOL_PMU__EVENT_NUM_CPUS_ONLINE: {
struct perf_cpu_map *online = cpu_map__online();
if (online) {
*result = perf_cpu_map__nr(online);
return true;
}
return false;
}
case TOOL_PMU__EVENT_NUM_DIES:
topology = online_topology();
*result = topology->die_cpus_lists;
return true;
case TOOL_PMU__EVENT_NUM_PACKAGES:
topology = online_topology();
*result = topology->package_cpus_lists;
return true;
case TOOL_PMU__EVENT_SLOTS:
*result = tool_pmu__cpu_slots_per_cycle();
return *result ? true : false;
case TOOL_PMU__EVENT_SMT_ON:
*result = smt_on() ? 1 : 0;
return true;
case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ:
*result = arch_get_tsc_freq();
return true;
case TOOL_PMU__EVENT_NONE:
case TOOL_PMU__EVENT_DURATION_TIME:
case TOOL_PMU__EVENT_USER_TIME:
case TOOL_PMU__EVENT_SYSTEM_TIME:
case TOOL_PMU__EVENT_MAX:
default:
return false;
}
}
int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
{
__u64 *start_time, cur_time, delta_start;
unsigned long val;
int fd, err = 0;
struct perf_counts_values *count;
struct perf_counts_values *count, *old_count = NULL;
bool adjust = false;
enum tool_pmu_event ev = evsel__tool_event(evsel);
count = perf_counts(evsel->counts, cpu_map_idx, thread);
switch (evsel__tool_event(evsel)) {
switch (ev) {
case TOOL_PMU__EVENT_HAS_PMEM:
case TOOL_PMU__EVENT_NUM_CORES:
case TOOL_PMU__EVENT_NUM_CPUS:
case TOOL_PMU__EVENT_NUM_CPUS_ONLINE:
case TOOL_PMU__EVENT_NUM_DIES:
case TOOL_PMU__EVENT_NUM_PACKAGES:
case TOOL_PMU__EVENT_SLOTS:
case TOOL_PMU__EVENT_SMT_ON:
case TOOL_PMU__EVENT_SYSTEM_TSC_FREQ:
if (evsel->prev_raw_counts)
old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
val = 0;
if (cpu_map_idx == 0 && thread == 0) {
if (!tool_pmu__read_event(ev, &val)) {
count->lost++;
val = 0;
}
}
if (old_count) {
count->val = old_count->val + val;
count->run = old_count->run + 1;
count->ena = old_count->ena + 1;
} else {
count->val = val;
count->run++;
count->ena++;
}
return 0;
case TOOL_PMU__EVENT_DURATION_TIME:
/*
* Pretend duration_time is only on the first CPU and thread, or

View File

@ -10,9 +10,18 @@ struct print_callbacks;
enum tool_pmu_event {
TOOL_PMU__EVENT_NONE = 0,
TOOL_PMU__EVENT_DURATION_TIME = 1,
TOOL_PMU__EVENT_USER_TIME = 2,
TOOL_PMU__EVENT_SYSTEM_TIME = 3,
TOOL_PMU__EVENT_DURATION_TIME,
TOOL_PMU__EVENT_USER_TIME,
TOOL_PMU__EVENT_SYSTEM_TIME,
TOOL_PMU__EVENT_HAS_PMEM,
TOOL_PMU__EVENT_NUM_CORES,
TOOL_PMU__EVENT_NUM_CPUS,
TOOL_PMU__EVENT_NUM_CPUS_ONLINE,
TOOL_PMU__EVENT_NUM_DIES,
TOOL_PMU__EVENT_NUM_PACKAGES,
TOOL_PMU__EVENT_SLOTS,
TOOL_PMU__EVENT_SMT_ON,
TOOL_PMU__EVENT_SYSTEM_TSC_FREQ,
TOOL_PMU__EVENT_MAX,
};
@ -31,10 +40,12 @@ int tool_pmu__config_terms(struct perf_event_attr *attr,
struct parse_events_terms *terms,
struct parse_events_error *err);
int tool_pmu__for_each_event_cb(struct perf_pmu *pmu, void *state, pmu_event_callback cb);
bool tool_pmu__read_event(enum tool_pmu_event ev, u64 *result);
u64 tool_pmu__cpu_slots_per_cycle(void);
bool perf_pmu__is_tool(const struct perf_pmu *pmu);
bool evsel__is_tool(const struct evsel *evsel);
enum tool_pmu_event evsel__tool_event(const struct evsel *evsel);
const char *evsel__tool_pmu_event_name(const struct evsel *evsel);

View File

@ -25,7 +25,7 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
u64 rdtsc(void);
double arch_get_tsc_freq(void);
u64 arch_get_tsc_freq(void);
size_t perf_event__fprintf_time_conv(union perf_event *event, FILE *fp);