mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-07 13:43:51 +00:00
perf tools fixes for v5.18: 1st batch
- Fix the clang command line option probing and remove some options to filter out, fixing the build with the latest clang versions. - Fix 'perf bench' futex and epoll benchmarks to deal with machines with more than 1K CPUs. - Fix 'perf test tsc' error message when not supported. - Remap perf ring buffer if there is no space for event, fixing perf usage in 32-bit ChromeOS. - Drop objdump stderr to avoid getting stuck waiting for stdout output in 'perf annotate'. - Fix up garbled output by now showing unwind error messages when augmenting frame in best effort mode. - Fix perf's libperf_print callback, use the va_args eprintf() variant. - Sync vhost and arm64 cputype headers with the kernel sources. - Fix 'perf report --mem-mode' with ARM SPE. - Add missing external commands ('perf iiostat', etc) to 'perf --list-cmds'. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQR2GiIUctdOfX2qHhGyPKLppCJ+JwUCYlIM+gAKCRCyPKLppCJ+ J5l6AQCCY4co/6FBh8JMmMX4RVHAUriX0YfKTJfpeLU3nsiXPAD/TVqf1LOyYaPv /ZqJ8DwqvKr9nkUsf5kAOfPrDB/j/QQ= =0UV/ -----END PGP SIGNATURE----- Merge tag 'perf-tools-fixes-for-v5.18-2022-04-09' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux Pull perf tools fixes from Arnaldo Carvalho de Melo: - Fix the clang command line option probing and remove some options to filter out, fixing the build with the latest clang versions - Fix 'perf bench' futex and epoll benchmarks to deal with machines with more than 1K CPUs - Fix 'perf test tsc' error message when not supported - Remap perf ring buffer if there is no space for event, fixing perf usage in 32-bit ChromeOS - Drop objdump stderr to avoid getting stuck waiting for stdout output in 'perf annotate' - Fix up garbled output by now showing unwind error messages when augmenting frame in best effort mode - Fix perf's libperf_print callback, use the va_args eprintf() variant - Sync vhost and arm64 cputype headers with the kernel sources - Fix 'perf report --mem-mode' with ARM SPE - Add missing external commands ('iiostat', etc) to 'perf --list-cmds' * tag 'perf-tools-fixes-for-v5.18-2022-04-09' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: perf annotate: Drop objdump stderr to avoid getting stuck waiting for stdout output perf tools: Add external commands to list-cmds perf docs: Add perf-iostat link to manpages perf session: Remap buf if there is no space for event perf bench: Fix epoll bench to correct usage of affinity for machines with #CPUs > 1K perf bench: Fix futex bench to correct usage of affinity for machines with #CPUs > 1K perf tools: Fix perf's libperf_print callback perf: arm-spe: Fix perf report --mem-mode perf unwind: Don't show unwind error messages when augmenting frame pointer stack tools headers arm64: Sync arm64's cputype.h with the kernel sources perf test tsc: Fix error message when not supported perf build: Don't use -ffat-lto-objects in the python feature test when building with clang-13 perf python: Fix probing for some clang command line options tools build: Filter out options and warnings not supported by clang tools build: Use $(shell ) instead of `` to get embedded libperl's ccopts tools include UAPI: Sync linux/vhost.h with the kernel sources
This commit is contained in:
commit
1862a69c91
@ -75,6 +75,7 @@
|
||||
#define ARM_CPU_PART_CORTEX_A77 0xD0D
|
||||
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
|
||||
#define ARM_CPU_PART_CORTEX_A78 0xD41
|
||||
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
|
||||
#define ARM_CPU_PART_CORTEX_X1 0xD44
|
||||
#define ARM_CPU_PART_CORTEX_A510 0xD46
|
||||
#define ARM_CPU_PART_CORTEX_A710 0xD47
|
||||
@ -130,6 +131,7 @@
|
||||
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
|
||||
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
|
||||
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
|
||||
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
|
||||
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
|
||||
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
|
||||
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
|
||||
|
@ -217,9 +217,16 @@ strip-libs = $(filter-out -l%,$(1))
|
||||
PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
|
||||
PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
|
||||
PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
|
||||
PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
|
||||
PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
|
||||
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
|
||||
|
||||
ifeq ($(CC_NO_CLANG), 0)
|
||||
PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
|
||||
PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS))
|
||||
PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
|
||||
FLAGS_PERL_EMBED += -Wno-compound-token-split-by-macro
|
||||
endif
|
||||
|
||||
$(OUTPUT)test-libperl.bin:
|
||||
$(BUILD) $(FLAGS_PERL_EMBED)
|
||||
|
||||
|
@ -150,4 +150,11 @@
|
||||
/* Get the valid iova range */
|
||||
#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
|
||||
struct vhost_vdpa_iova_range)
|
||||
|
||||
/* Get the config size */
|
||||
#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
|
||||
|
||||
/* Get the count of all virtqueues */
|
||||
#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
|
||||
|
||||
#endif
|
||||
|
@ -83,7 +83,7 @@ linkperf:perf-buildid-list[1], linkperf:perf-c2c[1],
|
||||
linkperf:perf-config[1], linkperf:perf-data[1], linkperf:perf-diff[1],
|
||||
linkperf:perf-evlist[1], linkperf:perf-ftrace[1],
|
||||
linkperf:perf-help[1], linkperf:perf-inject[1],
|
||||
linkperf:perf-intel-pt[1], linkperf:perf-kallsyms[1],
|
||||
linkperf:perf-intel-pt[1], linkperf:perf-iostat[1], linkperf:perf-kallsyms[1],
|
||||
linkperf:perf-kmem[1], linkperf:perf-kvm[1], linkperf:perf-lock[1],
|
||||
linkperf:perf-mem[1], linkperf:perf-probe[1], linkperf:perf-sched[1],
|
||||
linkperf:perf-script[1], linkperf:perf-test[1],
|
||||
|
@ -272,6 +272,9 @@ ifdef PYTHON_CONFIG
|
||||
PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
|
||||
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
|
||||
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
|
||||
ifeq ($(CC_NO_CLANG), 0)
|
||||
PYTHON_EMBED_CCOPTS := $(filter-out -ffat-lto-objects, $(PYTHON_EMBED_CCOPTS))
|
||||
endif
|
||||
endif
|
||||
|
||||
FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
|
||||
@ -790,6 +793,9 @@ else
|
||||
LDFLAGS += $(PERL_EMBED_LDFLAGS)
|
||||
EXTLIBS += $(PERL_EMBED_LIBADD)
|
||||
CFLAGS += -DHAVE_LIBPERL_SUPPORT
|
||||
ifeq ($(CC_NO_CLANG), 0)
|
||||
CFLAGS += -Wno-compound-token-split-by-macro
|
||||
endif
|
||||
$(call detected,CONFIG_LIBPERL)
|
||||
endif
|
||||
endif
|
||||
|
@ -239,6 +239,12 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
|
||||
arm_spe_set_timestamp(itr, arm_spe_evsel);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set this only so that perf report knows that SPE generates memory info. It has no effect
|
||||
* on the opening of the event or the SPE data produced.
|
||||
*/
|
||||
evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
|
||||
|
||||
/* Add dummy event to keep tracking */
|
||||
err = parse_events(evlist, "dummy:u", NULL);
|
||||
if (err)
|
||||
|
@ -222,13 +222,20 @@ static void init_fdmaps(struct worker *w, int pct)
|
||||
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
|
||||
{
|
||||
pthread_attr_t thread_attr, *attrp = NULL;
|
||||
cpu_set_t cpuset;
|
||||
cpu_set_t *cpuset;
|
||||
unsigned int i, j;
|
||||
int ret = 0;
|
||||
int nrcpus;
|
||||
size_t size;
|
||||
|
||||
if (!noaffinity)
|
||||
pthread_attr_init(&thread_attr);
|
||||
|
||||
nrcpus = perf_cpu_map__nr(cpu);
|
||||
cpuset = CPU_ALLOC(nrcpus);
|
||||
BUG_ON(!cpuset);
|
||||
size = CPU_ALLOC_SIZE(nrcpus);
|
||||
|
||||
for (i = 0; i < nthreads; i++) {
|
||||
struct worker *w = &worker[i];
|
||||
|
||||
@ -252,22 +259,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
|
||||
init_fdmaps(w, 50);
|
||||
|
||||
if (!noaffinity) {
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
|
||||
CPU_ZERO_S(size, cpuset);
|
||||
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
|
||||
size, cpuset);
|
||||
|
||||
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
|
||||
if (ret)
|
||||
ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
|
||||
if (ret) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
|
||||
}
|
||||
|
||||
attrp = &thread_attr;
|
||||
}
|
||||
|
||||
ret = pthread_create(&w->thread, attrp, workerfn,
|
||||
(void *)(struct worker *) w);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_create");
|
||||
}
|
||||
}
|
||||
|
||||
CPU_FREE(cpuset);
|
||||
if (!noaffinity)
|
||||
pthread_attr_destroy(&thread_attr);
|
||||
|
||||
|
@ -291,9 +291,11 @@ static void print_summary(void)
|
||||
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
|
||||
{
|
||||
pthread_attr_t thread_attr, *attrp = NULL;
|
||||
cpu_set_t cpuset;
|
||||
cpu_set_t *cpuset;
|
||||
unsigned int i, j;
|
||||
int ret = 0, events = EPOLLIN;
|
||||
int nrcpus;
|
||||
size_t size;
|
||||
|
||||
if (oneshot)
|
||||
events |= EPOLLONESHOT;
|
||||
@ -306,6 +308,11 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
|
||||
if (!noaffinity)
|
||||
pthread_attr_init(&thread_attr);
|
||||
|
||||
nrcpus = perf_cpu_map__nr(cpu);
|
||||
cpuset = CPU_ALLOC(nrcpus);
|
||||
BUG_ON(!cpuset);
|
||||
size = CPU_ALLOC_SIZE(nrcpus);
|
||||
|
||||
for (i = 0; i < nthreads; i++) {
|
||||
struct worker *w = &worker[i];
|
||||
|
||||
@ -341,22 +348,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
|
||||
}
|
||||
|
||||
if (!noaffinity) {
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
|
||||
CPU_ZERO_S(size, cpuset);
|
||||
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
|
||||
size, cpuset);
|
||||
|
||||
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
|
||||
if (ret)
|
||||
ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
|
||||
if (ret) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
|
||||
}
|
||||
|
||||
attrp = &thread_attr;
|
||||
}
|
||||
|
||||
ret = pthread_create(&w->thread, attrp, workerfn,
|
||||
(void *)(struct worker *) w);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_create");
|
||||
}
|
||||
}
|
||||
|
||||
CPU_FREE(cpuset);
|
||||
if (!noaffinity)
|
||||
pthread_attr_destroy(&thread_attr);
|
||||
|
||||
|
@ -122,12 +122,14 @@ static void print_summary(void)
|
||||
int bench_futex_hash(int argc, const char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
cpu_set_t cpuset;
|
||||
cpu_set_t *cpuset;
|
||||
struct sigaction act;
|
||||
unsigned int i;
|
||||
pthread_attr_t thread_attr;
|
||||
struct worker *worker = NULL;
|
||||
struct perf_cpu_map *cpu;
|
||||
int nrcpus;
|
||||
size_t size;
|
||||
|
||||
argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
|
||||
if (argc) {
|
||||
@ -170,25 +172,35 @@ int bench_futex_hash(int argc, const char **argv)
|
||||
threads_starting = params.nthreads;
|
||||
pthread_attr_init(&thread_attr);
|
||||
gettimeofday(&bench__start, NULL);
|
||||
|
||||
nrcpus = perf_cpu_map__nr(cpu);
|
||||
cpuset = CPU_ALLOC(nrcpus);
|
||||
BUG_ON(!cpuset);
|
||||
size = CPU_ALLOC_SIZE(nrcpus);
|
||||
|
||||
for (i = 0; i < params.nthreads; i++) {
|
||||
worker[i].tid = i;
|
||||
worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex));
|
||||
if (!worker[i].futex)
|
||||
goto errmem;
|
||||
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
|
||||
CPU_ZERO_S(size, cpuset);
|
||||
|
||||
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
|
||||
if (ret)
|
||||
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
|
||||
ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
|
||||
if (ret) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
|
||||
|
||||
}
|
||||
ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
|
||||
(void *)(struct worker *) &worker[i]);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_create");
|
||||
}
|
||||
|
||||
}
|
||||
CPU_FREE(cpuset);
|
||||
pthread_attr_destroy(&thread_attr);
|
||||
|
||||
pthread_mutex_lock(&thread_lock);
|
||||
|
@ -120,11 +120,17 @@ static void *workerfn(void *arg)
|
||||
static void create_threads(struct worker *w, pthread_attr_t thread_attr,
|
||||
struct perf_cpu_map *cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
cpu_set_t *cpuset;
|
||||
unsigned int i;
|
||||
int nrcpus = perf_cpu_map__nr(cpu);
|
||||
size_t size;
|
||||
|
||||
threads_starting = params.nthreads;
|
||||
|
||||
cpuset = CPU_ALLOC(nrcpus);
|
||||
BUG_ON(!cpuset);
|
||||
size = CPU_ALLOC_SIZE(nrcpus);
|
||||
|
||||
for (i = 0; i < params.nthreads; i++) {
|
||||
worker[i].tid = i;
|
||||
|
||||
@ -135,15 +141,20 @@ static void create_threads(struct worker *w, pthread_attr_t thread_attr,
|
||||
} else
|
||||
worker[i].futex = &global_futex;
|
||||
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
|
||||
CPU_ZERO_S(size, cpuset);
|
||||
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
|
||||
|
||||
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
|
||||
if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
|
||||
}
|
||||
|
||||
if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
|
||||
if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_create");
|
||||
}
|
||||
}
|
||||
CPU_FREE(cpuset);
|
||||
}
|
||||
|
||||
int bench_futex_lock_pi(int argc, const char **argv)
|
||||
|
@ -123,22 +123,33 @@ static void *workerfn(void *arg __maybe_unused)
|
||||
static void block_threads(pthread_t *w,
|
||||
pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
cpu_set_t *cpuset;
|
||||
unsigned int i;
|
||||
int nrcpus = perf_cpu_map__nr(cpu);
|
||||
size_t size;
|
||||
|
||||
threads_starting = params.nthreads;
|
||||
|
||||
cpuset = CPU_ALLOC(nrcpus);
|
||||
BUG_ON(!cpuset);
|
||||
size = CPU_ALLOC_SIZE(nrcpus);
|
||||
|
||||
/* create and block all threads */
|
||||
for (i = 0; i < params.nthreads; i++) {
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
|
||||
CPU_ZERO_S(size, cpuset);
|
||||
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
|
||||
|
||||
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
|
||||
if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
|
||||
}
|
||||
|
||||
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
|
||||
if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_create");
|
||||
}
|
||||
}
|
||||
CPU_FREE(cpuset);
|
||||
}
|
||||
|
||||
static void toggle_done(int sig __maybe_unused,
|
||||
|
@ -144,22 +144,33 @@ static void *blocked_workerfn(void *arg __maybe_unused)
|
||||
static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
|
||||
struct perf_cpu_map *cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
cpu_set_t *cpuset;
|
||||
unsigned int i;
|
||||
int nrcpus = perf_cpu_map__nr(cpu);
|
||||
size_t size;
|
||||
|
||||
threads_starting = params.nthreads;
|
||||
|
||||
cpuset = CPU_ALLOC(nrcpus);
|
||||
BUG_ON(!cpuset);
|
||||
size = CPU_ALLOC_SIZE(nrcpus);
|
||||
|
||||
/* create and block all threads */
|
||||
for (i = 0; i < params.nthreads; i++) {
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
|
||||
CPU_ZERO_S(size, cpuset);
|
||||
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
|
||||
|
||||
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
|
||||
if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
|
||||
}
|
||||
|
||||
if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
|
||||
if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL)) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_create");
|
||||
}
|
||||
}
|
||||
CPU_FREE(cpuset);
|
||||
}
|
||||
|
||||
static void print_run(struct thread_data *waking_worker, unsigned int run_num)
|
||||
|
@ -97,22 +97,32 @@ static void print_summary(void)
|
||||
static void block_threads(pthread_t *w,
|
||||
pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
|
||||
{
|
||||
cpu_set_t cpuset;
|
||||
cpu_set_t *cpuset;
|
||||
unsigned int i;
|
||||
|
||||
size_t size;
|
||||
int nrcpus = perf_cpu_map__nr(cpu);
|
||||
threads_starting = params.nthreads;
|
||||
|
||||
cpuset = CPU_ALLOC(nrcpus);
|
||||
BUG_ON(!cpuset);
|
||||
size = CPU_ALLOC_SIZE(nrcpus);
|
||||
|
||||
/* create and block all threads */
|
||||
for (i = 0; i < params.nthreads; i++) {
|
||||
CPU_ZERO(&cpuset);
|
||||
CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
|
||||
CPU_ZERO_S(size, cpuset);
|
||||
CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
|
||||
|
||||
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
|
||||
if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
|
||||
}
|
||||
|
||||
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
|
||||
if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
|
||||
CPU_FREE(cpuset);
|
||||
err(EXIT_FAILURE, "pthread_create");
|
||||
}
|
||||
}
|
||||
CPU_FREE(cpuset);
|
||||
}
|
||||
|
||||
static void toggle_done(int sig __maybe_unused,
|
||||
|
@ -55,6 +55,7 @@ struct cmd_struct {
|
||||
};
|
||||
|
||||
static struct cmd_struct commands[] = {
|
||||
{ "archive", NULL, 0 },
|
||||
{ "buildid-cache", cmd_buildid_cache, 0 },
|
||||
{ "buildid-list", cmd_buildid_list, 0 },
|
||||
{ "config", cmd_config, 0 },
|
||||
@ -62,6 +63,7 @@ static struct cmd_struct commands[] = {
|
||||
{ "diff", cmd_diff, 0 },
|
||||
{ "evlist", cmd_evlist, 0 },
|
||||
{ "help", cmd_help, 0 },
|
||||
{ "iostat", NULL, 0 },
|
||||
{ "kallsyms", cmd_kallsyms, 0 },
|
||||
{ "list", cmd_list, 0 },
|
||||
{ "record", cmd_record, 0 },
|
||||
@ -360,6 +362,8 @@ static void handle_internal_command(int argc, const char **argv)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(commands); i++) {
|
||||
struct cmd_struct *p = commands+i;
|
||||
if (p->fn == NULL)
|
||||
continue;
|
||||
if (strcmp(p->cmd, cmd))
|
||||
continue;
|
||||
exit(run_builtin(p, argc, argv));
|
||||
@ -434,7 +438,7 @@ void pthread__unblock_sigwinch(void)
|
||||
static int libperf_print(enum libperf_print_level level,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
return eprintf(level, verbose, fmt, ap);
|
||||
return veprintf(level, verbose, fmt, ap);
|
||||
}
|
||||
|
||||
int main(int argc, const char **argv)
|
||||
|
@ -122,7 +122,7 @@ NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thr
|
||||
}
|
||||
|
||||
err = unwind__get_entries(unwind_entry, &cnt, thread,
|
||||
&sample, MAX_STACK);
|
||||
&sample, MAX_STACK, false);
|
||||
if (err)
|
||||
pr_debug("unwind failed\n");
|
||||
else if (cnt != MAX_STACK) {
|
||||
|
@ -47,6 +47,17 @@
|
||||
} \
|
||||
}
|
||||
|
||||
static int test__tsc_is_supported(struct test_suite *test __maybe_unused,
|
||||
int subtest __maybe_unused)
|
||||
{
|
||||
if (!TSC_IS_SUPPORTED) {
|
||||
pr_debug("Test not supported on this architecture\n");
|
||||
return TEST_SKIP;
|
||||
}
|
||||
|
||||
return TEST_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* test__perf_time_to_tsc - test converting perf time to TSC.
|
||||
*
|
||||
@ -70,7 +81,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
|
||||
struct perf_cpu_map *cpus = NULL;
|
||||
struct evlist *evlist = NULL;
|
||||
struct evsel *evsel = NULL;
|
||||
int err = -1, ret, i;
|
||||
int err = TEST_FAIL, ret, i;
|
||||
const char *comm1, *comm2;
|
||||
struct perf_tsc_conversion tc;
|
||||
struct perf_event_mmap_page *pc;
|
||||
@ -79,10 +90,6 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
|
||||
u64 test_time, comm1_time = 0, comm2_time = 0;
|
||||
struct mmap *md;
|
||||
|
||||
if (!TSC_IS_SUPPORTED) {
|
||||
pr_debug("Test not supported on this architecture");
|
||||
return TEST_SKIP;
|
||||
}
|
||||
|
||||
threads = thread_map__new(-1, getpid(), UINT_MAX);
|
||||
CHECK_NOT_NULL__(threads);
|
||||
@ -124,8 +131,8 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
|
||||
ret = perf_read_tsc_conversion(pc, &tc);
|
||||
if (ret) {
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
fprintf(stderr, " (not supported)");
|
||||
return 0;
|
||||
pr_debug("perf_read_tsc_conversion is not supported in current kernel\n");
|
||||
err = TEST_SKIP;
|
||||
}
|
||||
goto out_err;
|
||||
}
|
||||
@ -191,7 +198,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
|
||||
test_tsc >= comm2_tsc)
|
||||
goto out_err;
|
||||
|
||||
err = 0;
|
||||
err = TEST_OK;
|
||||
|
||||
out_err:
|
||||
evlist__delete(evlist);
|
||||
@ -200,4 +207,15 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
|
||||
return err;
|
||||
}
|
||||
|
||||
DEFINE_SUITE("Convert perf time to TSC", perf_time_to_tsc);
|
||||
static struct test_case time_to_tsc_tests[] = {
|
||||
TEST_CASE_REASON("TSC support", tsc_is_supported,
|
||||
"This architecture does not support"),
|
||||
TEST_CASE_REASON("Perf time to TSC", perf_time_to_tsc,
|
||||
"perf_read_tsc_conversion is not supported"),
|
||||
{ .name = NULL, }
|
||||
};
|
||||
|
||||
struct test_suite suite__perf_time_to_tsc = {
|
||||
.desc = "Convert perf time to TSC",
|
||||
.test_cases = time_to_tsc_tests,
|
||||
};
|
||||
|
@ -2047,6 +2047,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
|
||||
objdump_process.argv = objdump_argv;
|
||||
objdump_process.out = -1;
|
||||
objdump_process.err = -1;
|
||||
objdump_process.no_stderr = 1;
|
||||
if (start_command(&objdump_process)) {
|
||||
pr_err("Failure starting to run %s\n", command);
|
||||
err = -1;
|
||||
|
@ -53,7 +53,7 @@ u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thr
|
||||
sample->user_regs.cache_regs[PERF_REG_ARM64_SP] = 0;
|
||||
}
|
||||
|
||||
ret = unwind__get_entries(add_entry, &entries, thread, sample, 2);
|
||||
ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true);
|
||||
sample->user_regs = old_regs;
|
||||
|
||||
if (ret || entries.length != 2)
|
||||
|
@ -2987,7 +2987,7 @@ static int thread__resolve_callchain_unwind(struct thread *thread,
|
||||
return 0;
|
||||
|
||||
return unwind__get_entries(unwind_entry, cursor,
|
||||
thread, sample, max_stack);
|
||||
thread, sample, max_stack, false);
|
||||
}
|
||||
|
||||
int thread__resolve_callchain(struct thread *thread,
|
||||
|
@ -2095,6 +2095,7 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
|
||||
bool needs_swap, union perf_event *error)
|
||||
{
|
||||
union perf_event *event;
|
||||
u16 event_size;
|
||||
|
||||
/*
|
||||
* Ensure we have enough space remaining to read
|
||||
@ -2107,15 +2108,23 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
|
||||
if (needs_swap)
|
||||
perf_event_header__bswap(&event->header);
|
||||
|
||||
if (head + event->header.size <= mmap_size)
|
||||
event_size = event->header.size;
|
||||
if (head + event_size <= mmap_size)
|
||||
return event;
|
||||
|
||||
/* We're not fetching the event so swap back again */
|
||||
if (needs_swap)
|
||||
perf_event_header__bswap(&event->header);
|
||||
|
||||
pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
|
||||
" fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
|
||||
/* Check if the event fits into the next mmapped buf. */
|
||||
if (event_size <= mmap_size - head % page_size) {
|
||||
/* Remap buf and fetch again. */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Invalid input. Event size should never exceed mmap_size. */
|
||||
pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
|
||||
" fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -1,12 +1,14 @@
|
||||
from os import getenv
|
||||
from os import getenv, path
|
||||
from subprocess import Popen, PIPE
|
||||
from re import sub
|
||||
|
||||
cc = getenv("CC")
|
||||
cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline()
|
||||
src_feature_tests = getenv('srctree') + '/tools/build/feature'
|
||||
|
||||
def clang_has_option(option):
|
||||
return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
|
||||
cc_output = Popen([cc, option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
|
||||
return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ]
|
||||
|
||||
if cc_is_clang:
|
||||
from distutils.sysconfig import get_config_vars
|
||||
@ -23,6 +25,8 @@ if cc_is_clang:
|
||||
vars[var] = sub("-fstack-protector-strong", "", vars[var])
|
||||
if not clang_has_option("-fno-semantic-interposition"):
|
||||
vars[var] = sub("-fno-semantic-interposition", "", vars[var])
|
||||
if not clang_has_option("-ffat-lto-objects"):
|
||||
vars[var] = sub("-ffat-lto-objects", "", vars[var])
|
||||
|
||||
from distutils.core import setup, Extension
|
||||
|
||||
|
@ -200,7 +200,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
|
||||
bool isactivation;
|
||||
|
||||
if (!dwfl_frame_pc(state, &pc, NULL)) {
|
||||
pr_err("%s", dwfl_errmsg(-1));
|
||||
if (!ui->best_effort)
|
||||
pr_err("%s", dwfl_errmsg(-1));
|
||||
return DWARF_CB_ABORT;
|
||||
}
|
||||
|
||||
@ -208,7 +209,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
|
||||
report_module(pc, ui);
|
||||
|
||||
if (!dwfl_frame_pc(state, &pc, &isactivation)) {
|
||||
pr_err("%s", dwfl_errmsg(-1));
|
||||
if (!ui->best_effort)
|
||||
pr_err("%s", dwfl_errmsg(-1));
|
||||
return DWARF_CB_ABORT;
|
||||
}
|
||||
|
||||
@ -222,7 +224,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
|
||||
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
|
||||
struct thread *thread,
|
||||
struct perf_sample *data,
|
||||
int max_stack)
|
||||
int max_stack,
|
||||
bool best_effort)
|
||||
{
|
||||
struct unwind_info *ui, ui_buf = {
|
||||
.sample = data,
|
||||
@ -231,6 +234,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
|
||||
.cb = cb,
|
||||
.arg = arg,
|
||||
.max_stack = max_stack,
|
||||
.best_effort = best_effort
|
||||
};
|
||||
Dwarf_Word ip;
|
||||
int err = -EINVAL, i;
|
||||
|
@ -20,6 +20,7 @@ struct unwind_info {
|
||||
void *arg;
|
||||
int max_stack;
|
||||
int idx;
|
||||
bool best_effort;
|
||||
struct unwind_entry entries[];
|
||||
};
|
||||
|
||||
|
@ -96,6 +96,7 @@ struct unwind_info {
|
||||
struct perf_sample *sample;
|
||||
struct machine *machine;
|
||||
struct thread *thread;
|
||||
bool best_effort;
|
||||
};
|
||||
|
||||
#define dw_read(ptr, type, end) ({ \
|
||||
@ -553,7 +554,8 @@ static int access_reg(unw_addr_space_t __maybe_unused as,
|
||||
|
||||
ret = perf_reg_value(&val, &ui->sample->user_regs, id);
|
||||
if (ret) {
|
||||
pr_err("unwind: can't read reg %d\n", regnum);
|
||||
if (!ui->best_effort)
|
||||
pr_err("unwind: can't read reg %d\n", regnum);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -666,7 +668,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
|
||||
return -1;
|
||||
|
||||
ret = unw_init_remote(&c, addr_space, ui);
|
||||
if (ret)
|
||||
if (ret && !ui->best_effort)
|
||||
display_error(ret);
|
||||
|
||||
while (!ret && (unw_step(&c) > 0) && i < max_stack) {
|
||||
@ -704,12 +706,14 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
|
||||
|
||||
static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
|
||||
struct thread *thread,
|
||||
struct perf_sample *data, int max_stack)
|
||||
struct perf_sample *data, int max_stack,
|
||||
bool best_effort)
|
||||
{
|
||||
struct unwind_info ui = {
|
||||
.sample = data,
|
||||
.thread = thread,
|
||||
.machine = thread->maps->machine,
|
||||
.best_effort = best_effort
|
||||
};
|
||||
|
||||
if (!data->user_regs.regs)
|
||||
|
@ -80,9 +80,11 @@ void unwind__finish_access(struct maps *maps)
|
||||
|
||||
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
|
||||
struct thread *thread,
|
||||
struct perf_sample *data, int max_stack)
|
||||
struct perf_sample *data, int max_stack,
|
||||
bool best_effort)
|
||||
{
|
||||
if (thread->maps->unwind_libunwind_ops)
|
||||
return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
|
||||
return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data,
|
||||
max_stack, best_effort);
|
||||
return 0;
|
||||
}
|
||||
|
@ -23,13 +23,19 @@ struct unwind_libunwind_ops {
|
||||
void (*finish_access)(struct maps *maps);
|
||||
int (*get_entries)(unwind_entry_cb_t cb, void *arg,
|
||||
struct thread *thread,
|
||||
struct perf_sample *data, int max_stack);
|
||||
struct perf_sample *data, int max_stack, bool best_effort);
|
||||
};
|
||||
|
||||
#ifdef HAVE_DWARF_UNWIND_SUPPORT
|
||||
/*
|
||||
* When best_effort is set, don't report errors and fail silently. This could
|
||||
* be expanded in the future to be more permissive about things other than
|
||||
* error messages.
|
||||
*/
|
||||
int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
|
||||
struct thread *thread,
|
||||
struct perf_sample *data, int max_stack);
|
||||
struct perf_sample *data, int max_stack,
|
||||
bool best_effort);
|
||||
/* libunwind specific */
|
||||
#ifdef HAVE_LIBUNWIND_SUPPORT
|
||||
#ifndef LIBUNWIND__ARCH_REG_ID
|
||||
@ -65,7 +71,8 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
|
||||
void *arg __maybe_unused,
|
||||
struct thread *thread __maybe_unused,
|
||||
struct perf_sample *data __maybe_unused,
|
||||
int max_stack __maybe_unused)
|
||||
int max_stack __maybe_unused,
|
||||
bool best_effort __maybe_unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user