mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
perf trace: Ignore thread hashing in summary
Commit 91e467bc56
("perf machine: Use hashtable for machine
threads") made the iteration of thread tids unordered. The perf trace
--summary output sorts and prints each hash bucket, rather than all
threads globally. Change this behavior by turn all threads into a
list, sort the list by number of trace events then by tids, finally
print the list. This also allows the rbtree in threads to be not
accessed outside of machine.
Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Yang Jihong <yangjihong1@huawei.com>
Cc: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240301053646.1449657-3-irogers@google.com
This commit is contained in:
parent
2f1e20feb9
commit
f178ffdf7e
@ -74,6 +74,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/time64.h>
|
||||
@ -4312,34 +4313,38 @@ static unsigned long thread__nr_events(struct thread_trace *ttrace)
|
||||
return ttrace ? ttrace->nr_events : 0;
|
||||
}
|
||||
|
||||
DEFINE_RESORT_RB(threads,
|
||||
(thread__nr_events(thread__priv(a->thread)) <
|
||||
thread__nr_events(thread__priv(b->thread))),
|
||||
struct thread *thread;
|
||||
)
|
||||
static int trace_nr_events_cmp(void *priv __maybe_unused,
|
||||
const struct list_head *la,
|
||||
const struct list_head *lb)
|
||||
{
|
||||
entry->thread = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
|
||||
struct thread_list *a = list_entry(la, struct thread_list, list);
|
||||
struct thread_list *b = list_entry(lb, struct thread_list, list);
|
||||
unsigned long a_nr_events = thread__nr_events(thread__priv(a->thread));
|
||||
unsigned long b_nr_events = thread__nr_events(thread__priv(b->thread));
|
||||
|
||||
if (a_nr_events != b_nr_events)
|
||||
return a_nr_events < b_nr_events ? -1 : 1;
|
||||
|
||||
/* Identical number of threads, place smaller tids first. */
|
||||
return thread__tid(a->thread) < thread__tid(b->thread)
|
||||
? -1
|
||||
: (thread__tid(a->thread) > thread__tid(b->thread) ? 1 : 0);
|
||||
}
|
||||
|
||||
static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
|
||||
{
|
||||
size_t printed = trace__fprintf_threads_header(fp);
|
||||
struct rb_node *nd;
|
||||
int i;
|
||||
LIST_HEAD(threads);
|
||||
|
||||
for (i = 0; i < THREADS__TABLE_SIZE; i++) {
|
||||
DECLARE_RESORT_RB_MACHINE_THREADS(threads, trace->host, i);
|
||||
if (machine__thread_list(trace->host, &threads) == 0) {
|
||||
struct thread_list *pos;
|
||||
|
||||
if (threads == NULL) {
|
||||
fprintf(fp, "%s", "Error sorting output by nr_events!\n");
|
||||
return 0;
|
||||
}
|
||||
list_sort(NULL, &threads, trace_nr_events_cmp);
|
||||
|
||||
resort_rb__for_each_entry(nd, threads)
|
||||
printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
|
||||
|
||||
resort_rb__delete(threads);
|
||||
list_for_each_entry(pos, &threads, list)
|
||||
printed += trace__fprintf_thread(fp, pos->thread, trace);
|
||||
}
|
||||
thread_list__delete(&threads);
|
||||
return printed;
|
||||
}
|
||||
|
||||
|
@ -143,9 +143,4 @@ struct __name##_sorted *__name = __name##_sorted__new
|
||||
DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \
|
||||
__ilist->rblist.nr_entries)
|
||||
|
||||
/* For 'struct machine->threads' */
|
||||
#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \
|
||||
DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \
|
||||
__machine->threads[hash_bucket].nr)
|
||||
|
||||
#endif /* _PERF_RESORT_RB_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user