mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
libperf cpumap: Replace usage of perf_cpu_map__new(NULL) with perf_cpu_map__new_online_cpus()
Passing NULL to perf_cpu_map__new() performs perf_cpu_map__new_online_cpus(), just directly call perf_cpu_map__new_online_cpus() to be more intention revealing. Reviewed-by: James Clark <james.clark@arm.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexandre Ghiti <alexghiti@rivosinc.com> Cc: Andrew Jones <ajones@ventanamicro.com> Cc: André Almeida <andrealmeid@igalia.com> Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com> Cc: Atish Patra <atishp@rivosinc.com> Cc: Changbin Du <changbin.du@huawei.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: K Prateek Nayak <kprateek.nayak@amd.com> Cc: Kajol Jain <kjain@linux.ibm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Paran Lee <p4ranlee@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Sean Christopherson <seanjc@google.com> Cc: Steinar H. Gunderson <sesse@google.com> Cc: Suzuki Poulouse <suzuki.poulose@arm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Yang Li <yang.lee@linux.alibaba.com> Cc: Yanteng Si <siyanteng@loongson.cn> Cc: bpf@vger.kernel.org Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20231129060211.1890454-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
923ca62a7b
commit
effe957c6b
@ -39,7 +39,7 @@ int main(int argc, char **argv)
|
||||
|
||||
libperf_init(libperf_print);
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
if (!cpus) {
|
||||
fprintf(stderr, "failed to create cpus\n");
|
||||
return -1;
|
||||
|
@ -97,7 +97,7 @@ In this case we will monitor all the available CPUs:
|
||||
|
||||
[source,c]
|
||||
--
|
||||
42 cpus = perf_cpu_map__new(NULL);
|
||||
42 cpus = perf_cpu_map__new_online_cpus();
|
||||
43 if (!cpus) {
|
||||
44 fprintf(stderr, "failed to create cpus\n");
|
||||
45 return -1;
|
||||
|
@ -39,7 +39,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
|
||||
if (evsel->system_wide) {
|
||||
/* System wide: set the cpu map of the evsel to all online CPUs. */
|
||||
perf_cpu_map__put(evsel->cpus);
|
||||
evsel->cpus = perf_cpu_map__new(NULL);
|
||||
evsel->cpus = perf_cpu_map__new_online_cpus();
|
||||
} else if (evlist->has_user_cpus && evsel->is_pmu_core) {
|
||||
/*
|
||||
* User requested CPUs on a core PMU, ensure the requested CPUs
|
||||
|
@ -46,7 +46,7 @@ static int test_stat_cpu(void)
|
||||
};
|
||||
int err, idx;
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
__T("failed to create cpus", cpus);
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
@ -350,7 +350,7 @@ static int test_mmap_cpus(void)
|
||||
|
||||
attr.config = id;
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
__T("failed to create cpus", cpus);
|
||||
|
||||
evlist = perf_evlist__new();
|
||||
|
@ -27,7 +27,7 @@ static int test_stat_cpu(void)
|
||||
};
|
||||
int err, idx;
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
__T("failed to create cpus", cpus);
|
||||
|
||||
evsel = perf_evsel__new(&attr);
|
||||
|
@ -199,7 +199,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr,
|
||||
{
|
||||
int i, err = -EINVAL;
|
||||
struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
|
||||
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
|
||||
struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
|
||||
|
||||
/* Set option of each CPU we have */
|
||||
for (i = 0; i < cpu__max_cpu().cpu; i++) {
|
||||
@ -536,7 +536,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
|
||||
int i;
|
||||
int etmv3 = 0, etmv4 = 0, ete = 0;
|
||||
struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
|
||||
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
|
||||
struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
|
||||
|
||||
/* cpu map is not empty, we have specific CPUs to work with */
|
||||
if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
|
||||
@ -802,7 +802,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
|
||||
u64 nr_cpu, type;
|
||||
struct perf_cpu_map *cpu_map;
|
||||
struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
|
||||
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
|
||||
struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
|
||||
struct cs_etm_recording *ptr =
|
||||
container_of(itr, struct cs_etm_recording, itr);
|
||||
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
|
||||
|
@ -57,7 +57,7 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus)
|
||||
|
||||
int get_cpuid(char *buf, size_t sz)
|
||||
{
|
||||
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
|
||||
struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
|
||||
int ret;
|
||||
|
||||
if (!cpus)
|
||||
|
@ -330,7 +330,7 @@ int bench_epoll_ctl(int argc, const char **argv)
|
||||
act.sa_sigaction = toggle_done;
|
||||
sigaction(SIGINT, &act, NULL);
|
||||
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new_online_cpus();
|
||||
if (!cpu)
|
||||
goto errmem;
|
||||
|
||||
|
@ -444,7 +444,7 @@ int bench_epoll_wait(int argc, const char **argv)
|
||||
act.sa_sigaction = toggle_done;
|
||||
sigaction(SIGINT, &act, NULL);
|
||||
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new_online_cpus();
|
||||
if (!cpu)
|
||||
goto errmem;
|
||||
|
||||
|
@ -138,7 +138,7 @@ int bench_futex_hash(int argc, const char **argv)
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new_online_cpus();
|
||||
if (!cpu)
|
||||
goto errmem;
|
||||
|
||||
|
@ -172,7 +172,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
|
||||
if (argc)
|
||||
goto err;
|
||||
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new_online_cpus();
|
||||
if (!cpu)
|
||||
err(EXIT_FAILURE, "calloc");
|
||||
|
||||
|
@ -174,7 +174,7 @@ int bench_futex_requeue(int argc, const char **argv)
|
||||
if (argc)
|
||||
goto err;
|
||||
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new_online_cpus();
|
||||
if (!cpu)
|
||||
err(EXIT_FAILURE, "cpu_map__new");
|
||||
|
||||
|
@ -264,7 +264,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
|
||||
err(EXIT_FAILURE, "mlockall");
|
||||
}
|
||||
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new_online_cpus();
|
||||
if (!cpu)
|
||||
err(EXIT_FAILURE, "calloc");
|
||||
|
||||
|
@ -149,7 +149,7 @@ int bench_futex_wake(int argc, const char **argv)
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
cpu = perf_cpu_map__new(NULL);
|
||||
cpu = perf_cpu_map__new_online_cpus();
|
||||
if (!cpu)
|
||||
err(EXIT_FAILURE, "calloc");
|
||||
|
||||
|
@ -333,7 +333,7 @@ static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
|
||||
|
||||
static int reset_tracing_cpu(void)
|
||||
{
|
||||
struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
|
||||
struct perf_cpu_map *cpumap = perf_cpu_map__new_online_cpus();
|
||||
int ret;
|
||||
|
||||
ret = set_tracing_cpumask(cpumap);
|
||||
|
@ -610,7 +610,7 @@ static int do_test_code_reading(bool try_kcore)
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
if (!cpus) {
|
||||
pr_debug("perf_cpu_map__new failed\n");
|
||||
goto out_put;
|
||||
|
@ -81,7 +81,7 @@ static int test__keep_tracking(struct test_suite *test __maybe_unused, int subte
|
||||
threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
CHECK_NOT_NULL__(threads);
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
CHECK_NOT_NULL__(cpus);
|
||||
|
||||
evlist = evlist__new();
|
||||
|
@ -52,7 +52,7 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest
|
||||
return -1;
|
||||
}
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
if (cpus == NULL) {
|
||||
pr_debug("perf_cpu_map__new\n");
|
||||
goto out_free_threads;
|
||||
|
@ -37,7 +37,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb
|
||||
return -1;
|
||||
}
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
if (cpus == NULL) {
|
||||
pr_debug("perf_cpu_map__new\n");
|
||||
goto out_thread_map_delete;
|
||||
|
@ -93,7 +93,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
|
||||
threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
CHECK_NOT_NULL__(threads);
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
CHECK_NOT_NULL__(cpus);
|
||||
|
||||
evlist = evlist__new();
|
||||
|
@ -351,7 +351,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
if (!cpus) {
|
||||
pr_debug("perf_cpu_map__new failed!\n");
|
||||
goto out_err;
|
||||
|
@ -215,7 +215,7 @@ static int test__session_topology(struct test_suite *test __maybe_unused, int su
|
||||
if (session_write_header(path))
|
||||
goto free_path;
|
||||
|
||||
map = perf_cpu_map__new(NULL);
|
||||
map = perf_cpu_map__new_online_cpus();
|
||||
if (map == NULL) {
|
||||
pr_debug("failed to get system cpumap\n");
|
||||
goto free_path;
|
||||
|
@ -455,7 +455,7 @@ static int bperf__load(struct evsel *evsel, struct target *target)
|
||||
return -1;
|
||||
|
||||
if (!all_cpu_map) {
|
||||
all_cpu_map = perf_cpu_map__new(NULL);
|
||||
all_cpu_map = perf_cpu_map__new_online_cpus();
|
||||
if (!all_cpu_map)
|
||||
return -1;
|
||||
}
|
||||
|
@ -672,7 +672,7 @@ struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
|
||||
static struct perf_cpu_map *online;
|
||||
|
||||
if (!online)
|
||||
online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */
|
||||
online = perf_cpu_map__new_online_cpus(); /* from /sys/devices/system/cpu/online */
|
||||
|
||||
return online;
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ struct cpu_topology *cpu_topology__new(void)
|
||||
ncpus = cpu__max_present_cpu().cpu;
|
||||
|
||||
/* build online CPU map */
|
||||
map = perf_cpu_map__new(NULL);
|
||||
map = perf_cpu_map__new_online_cpus();
|
||||
if (map == NULL) {
|
||||
pr_debug("failed to get system cpumap\n");
|
||||
return NULL;
|
||||
|
@ -1352,7 +1352,7 @@ static int evlist__create_syswide_maps(struct evlist *evlist)
|
||||
* error, and we may not want to do that fallback to a
|
||||
* default cpu identity map :-\
|
||||
*/
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
if (!cpus)
|
||||
goto out;
|
||||
|
||||
|
@ -64,7 +64,7 @@ static bool perf_probe_api(setup_probe_fn_t fn)
|
||||
struct perf_cpu cpu;
|
||||
int ret, i = 0;
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
if (!cpus)
|
||||
return false;
|
||||
cpu = perf_cpu_map__cpu(cpus, 0);
|
||||
@ -140,7 +140,7 @@ bool perf_can_record_cpu_wide(void)
|
||||
struct perf_cpu cpu;
|
||||
int fd;
|
||||
|
||||
cpus = perf_cpu_map__new(NULL);
|
||||
cpus = perf_cpu_map__new_online_cpus();
|
||||
if (!cpus)
|
||||
return false;
|
||||
|
||||
|
@ -238,7 +238,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
|
||||
evsel = evlist__last(temp_evlist);
|
||||
|
||||
if (!evlist || perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) {
|
||||
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
|
||||
struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
|
||||
|
||||
if (cpus)
|
||||
cpu = perf_cpu_map__cpu(cpus, 0);
|
||||
|
Loading…
Reference in New Issue
Block a user