mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
trace: Add trace_ipi_send_cpu()
Because copying cpumasks around when targeting a single CPU is a bit daft... Tested-and-reviewed-by: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20230322103004.GA571242%40hirez.programming.kicks-ass.net
This commit is contained in:
parent
68f4ff04db
commit
68e2d17c9e
@ -130,9 +130,9 @@ extern void arch_smp_send_reschedule(int cpu);
|
||||
* scheduler_ipi() is inline so can't be passed as callback reason, but the
|
||||
* callsite IP should be sufficient for root-causing IPIs sent from here.
|
||||
*/
|
||||
#define smp_send_reschedule(cpu) ({ \
|
||||
trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, NULL); \
|
||||
arch_smp_send_reschedule(cpu); \
|
||||
#define smp_send_reschedule(cpu) ({ \
|
||||
trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \
|
||||
arch_smp_send_reschedule(cpu); \
|
||||
})
|
||||
|
||||
/*
|
||||
|
@ -35,6 +35,28 @@ TRACE_EVENT(ipi_raise,
|
||||
TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ipi_send_cpu,
|
||||
|
||||
TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback),
|
||||
|
||||
TP_ARGS(cpu, callsite, callback),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, cpu)
|
||||
__field(void *, callsite)
|
||||
__field(void *, callback)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cpu = cpu;
|
||||
__entry->callsite = (void *)callsite;
|
||||
__entry->callback = callback;
|
||||
),
|
||||
|
||||
TP_printk("cpu=%u callsite=%pS callback=%pS",
|
||||
__entry->cpu, __entry->callsite, __entry->callback)
|
||||
);
|
||||
|
||||
TRACE_EVENT(ipi_send_cpumask,
|
||||
|
||||
TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback),
|
||||
|
@ -78,10 +78,8 @@ void __weak arch_irq_work_raise(void)
|
||||
|
||||
static __always_inline void irq_work_raise(struct irq_work *work)
|
||||
{
|
||||
if (trace_ipi_send_cpumask_enabled() && arch_irq_work_has_interrupt())
|
||||
trace_ipi_send_cpumask(cpumask_of(smp_processor_id()),
|
||||
_RET_IP_,
|
||||
work->func);
|
||||
if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt())
|
||||
trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func);
|
||||
|
||||
arch_irq_work_raise();
|
||||
}
|
||||
|
@ -96,6 +96,7 @@
|
||||
#include "../../io_uring/io-wq.h"
|
||||
#include "../smpboot.h"
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
|
||||
|
||||
/*
|
||||
|
@ -107,7 +107,7 @@ static __always_inline void
|
||||
send_call_function_single_ipi(int cpu, smp_call_func_t func)
|
||||
{
|
||||
if (call_function_single_prep_ipi(cpu)) {
|
||||
trace_ipi_send_cpumask(cpumask_of(cpu), _RET_IP_, func);
|
||||
trace_ipi_send_cpu(cpu, _RET_IP_, func);
|
||||
arch_send_call_function_single_ipi(cpu);
|
||||
}
|
||||
}
|
||||
@ -346,7 +346,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
|
||||
* even if we haven't sent the smp_call IPI yet (e.g. the stopper
|
||||
* executes migration_cpu_stop() on the remote CPU).
|
||||
*/
|
||||
if (trace_ipi_send_cpumask_enabled()) {
|
||||
if (trace_ipi_send_cpu_enabled()) {
|
||||
call_single_data_t *csd;
|
||||
smp_call_func_t func;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user