mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 01:02:08 +00:00
Tracing updates for 6.13:
- Add trace flag for NEED_RESCHED_LAZY Now that NEED_RESCHED_LAZY is upstream, add it to the status bits of the common_flags. This will now show when the NEED_RESCHED_LAZY flag is set that is used for debugging latency issues in the kernel via a trace. - Remove leftover "__idx" variable when SRCU was removed from the tracepoint code - Add rcu_tasks_trace guard To add a guard() around the tracepoint code, a rcu_tasks_trace guard needs to be created first. - Remove __DO_TRACE() macro and just call __DO_TRACE_CALL() directly The DO_TRACE() macro has conditional locking depending on what was passed into the macro parameters. As the guts of the macro has been moved to __DO_TRACE_CALL() to handle static call logic, there's no reason to keep the __DO_TRACE() macro around. It is better to just do the locking in place without the conditionals and call __DO_TRACE_CALL() from those locations. The "cond" passed in can also be moved out of that macro. This simplifies the code. - Remove the "cond" from the system call tracepoint macros The "cond" variable was added to allow some tracepoints to check a condition within the static_branch (jump/nop) logic. The system calls do not need this. Removing it simplifies the code. - Replace scoped_guard() with just guard() in the tracepoint logic guard() works just as well as scoped_guard() in the tracepoint logic and the scoped_guard() causes some issues. -----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCZ0dGmBQccm9zdGVkdEBn b29kbWlzLm9yZwAKCRAp5XQQmuv6qsZkAP9cm2psIGp2n1BgVjA+0tBRQJUnexEG RualDkF5wAETLwD9FNFI/EUwDR/E8gNt0SY309EJZ1ijRiLjtU0spbQmdgs= =awid -----END PGP SIGNATURE----- Merge tag 'trace-v6.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace Pull more tracing updates from Steven Rostedt: - Add trace flag for NEED_RESCHED_LAZY Now that NEED_RESCHED_LAZY is upstream, add it to the status bits of the common_flags. This will now show when the NEED_RESCHED_LAZY flag is set that is used for debugging latency issues in the kernel via a trace. - Remove leftover "__idx" variable when SRCU was removed from the tracepoint code - Add rcu_tasks_trace guard To add a guard() around the tracepoint code, a rcu_tasks_trace guard needs to be created first. - Remove __DO_TRACE() macro and just call __DO_TRACE_CALL() directly The DO_TRACE() macro has conditional locking depending on what was passed into the macro parameters. As the guts of the macro has been moved to __DO_TRACE_CALL() to handle static call logic, there's no reason to keep the __DO_TRACE() macro around. It is better to just do the locking in place without the conditionals and call __DO_TRACE_CALL() from those locations. The "cond" passed in can also be moved out of that macro. This simplifies the code. - Remove the "cond" from the system call tracepoint macros The "cond" variable was added to allow some tracepoints to check a condition within the static_branch (jump/nop) logic. The system calls do not need this. Removing it simplifies the code. - Replace scoped_guard() with just guard() in the tracepoint logic guard() works just as well as scoped_guard() in the tracepoint logic and the scoped_guard() causes some issues. * tag 'trace-v6.13-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: tracing: Use guard() rather than scoped_guard() tracing: Remove cond argument from __DECLARE_TRACE_SYSCALL tracing: Remove conditional locking from __DO_TRACE() rcupdate_trace: Define rcu_tasks_trace lock guard tracing: Remove __idx variable from __DO_TRACE tracing: Move it_func[0] comment to the relevant context tracing: Record task flag NEED_RESCHED_LAZY.
This commit is contained in:
commit
7af08b57bc
@ -1033,9 +1033,13 @@ explains which is which.
|
||||
irqs-off: 'd' interrupts are disabled. '.' otherwise.
|
||||
|
||||
need-resched:
|
||||
- 'B' all, TIF_NEED_RESCHED, PREEMPT_NEED_RESCHED and TIF_RESCHED_LAZY is set,
|
||||
- 'N' both TIF_NEED_RESCHED and PREEMPT_NEED_RESCHED is set,
|
||||
- 'n' only TIF_NEED_RESCHED is set,
|
||||
- 'p' only PREEMPT_NEED_RESCHED is set,
|
||||
- 'L' both PREEMPT_NEED_RESCHED and TIF_RESCHED_LAZY is set,
|
||||
- 'b' both TIF_NEED_RESCHED and TIF_RESCHED_LAZY is set,
|
||||
- 'l' only TIF_RESCHED_LAZY is set
|
||||
- '.' otherwise.
|
||||
|
||||
hardirq/softirq:
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
extern struct lockdep_map rcu_trace_lock_map;
|
||||
|
||||
@ -98,4 +99,8 @@ static inline void rcu_read_lock_trace(void) { BUG(); }
|
||||
static inline void rcu_read_unlock_trace(void) { BUG(); }
|
||||
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
|
||||
|
||||
DEFINE_LOCK_GUARD_0(rcu_tasks_trace,
|
||||
rcu_read_lock_trace(),
|
||||
rcu_read_unlock_trace())
|
||||
|
||||
#endif /* __LINUX_RCUPDATE_TRACE_H */
|
||||
|
@ -184,6 +184,7 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
|
||||
|
||||
enum trace_flag_type {
|
||||
TRACE_FLAG_IRQS_OFF = 0x01,
|
||||
TRACE_FLAG_NEED_RESCHED_LAZY = 0x02,
|
||||
TRACE_FLAG_NEED_RESCHED = 0x04,
|
||||
TRACE_FLAG_HARDIRQ = 0x08,
|
||||
TRACE_FLAG_SOFTIRQ = 0x10,
|
||||
|
@ -209,36 +209,6 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
#define __DO_TRACE_CALL(name, args) __traceiter_##name(NULL, args)
|
||||
#endif /* CONFIG_HAVE_STATIC_CALL */
|
||||
|
||||
/*
|
||||
* it_func[0] is never NULL because there is at least one element in the array
|
||||
* when the array itself is non NULL.
|
||||
*
|
||||
* With @syscall=0, the tracepoint callback array dereference is
|
||||
* protected by disabling preemption.
|
||||
* With @syscall=1, the tracepoint callback array dereference is
|
||||
* protected by Tasks Trace RCU, which allows probes to handle page
|
||||
* faults.
|
||||
*/
|
||||
#define __DO_TRACE(name, args, cond, syscall) \
|
||||
do { \
|
||||
int __maybe_unused __idx = 0; \
|
||||
\
|
||||
if (!(cond)) \
|
||||
return; \
|
||||
\
|
||||
if (syscall) \
|
||||
rcu_read_lock_trace(); \
|
||||
else \
|
||||
preempt_disable_notrace(); \
|
||||
\
|
||||
__DO_TRACE_CALL(name, TP_ARGS(args)); \
|
||||
\
|
||||
if (syscall) \
|
||||
rcu_read_unlock_trace(); \
|
||||
else \
|
||||
preempt_enable_notrace(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Declare an exported function that Rust code can call to trigger this
|
||||
* tracepoint. This function does not include the static branch; that is done
|
||||
@ -262,7 +232,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
* site if it is not watching, as it will need to be active when the
|
||||
* tracepoint is enabled.
|
||||
*/
|
||||
#define __DECLARE_TRACE_COMMON(name, proto, args, cond, data_proto) \
|
||||
#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \
|
||||
extern int __traceiter_##name(data_proto); \
|
||||
DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
|
||||
extern struct tracepoint __tracepoint_##name; \
|
||||
@ -297,41 +267,43 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
}
|
||||
|
||||
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
|
||||
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), cond, PARAMS(data_proto)) \
|
||||
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
|
||||
static inline void __rust_do_trace_##name(proto) \
|
||||
{ \
|
||||
__DO_TRACE(name, \
|
||||
TP_ARGS(args), \
|
||||
TP_CONDITION(cond), 0); \
|
||||
if (cond) { \
|
||||
guard(preempt_notrace)(); \
|
||||
__DO_TRACE_CALL(name, TP_ARGS(args)); \
|
||||
} \
|
||||
} \
|
||||
static inline void trace_##name(proto) \
|
||||
{ \
|
||||
if (static_branch_unlikely(&__tracepoint_##name.key)) \
|
||||
__DO_TRACE(name, \
|
||||
TP_ARGS(args), \
|
||||
TP_CONDITION(cond), 0); \
|
||||
if (static_branch_unlikely(&__tracepoint_##name.key)) { \
|
||||
if (cond) { \
|
||||
guard(preempt_notrace)(); \
|
||||
__DO_TRACE_CALL(name, TP_ARGS(args)); \
|
||||
} \
|
||||
} \
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
|
||||
WARN_ONCE(!rcu_is_watching(), \
|
||||
"RCU not watching for tracepoint"); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define __DECLARE_TRACE_SYSCALL(name, proto, args, cond, data_proto) \
|
||||
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), cond, PARAMS(data_proto)) \
|
||||
#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \
|
||||
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
|
||||
static inline void __rust_do_trace_##name(proto) \
|
||||
{ \
|
||||
__DO_TRACE(name, \
|
||||
TP_ARGS(args), \
|
||||
TP_CONDITION(cond), 1); \
|
||||
guard(rcu_tasks_trace)(); \
|
||||
__DO_TRACE_CALL(name, TP_ARGS(args)); \
|
||||
} \
|
||||
static inline void trace_##name(proto) \
|
||||
{ \
|
||||
might_fault(); \
|
||||
if (static_branch_unlikely(&__tracepoint_##name.key)) \
|
||||
__DO_TRACE(name, \
|
||||
TP_ARGS(args), \
|
||||
TP_CONDITION(cond), 1); \
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
|
||||
if (static_branch_unlikely(&__tracepoint_##name.key)) { \
|
||||
guard(rcu_tasks_trace)(); \
|
||||
__DO_TRACE_CALL(name, TP_ARGS(args)); \
|
||||
} \
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP)) { \
|
||||
WARN_ONCE(!rcu_is_watching(), \
|
||||
"RCU not watching for tracepoint"); \
|
||||
} \
|
||||
@ -341,6 +313,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
* We have no guarantee that gcc and the linker won't up-align the tracepoint
|
||||
* structures, so we create an array of pointers that will be used for iteration
|
||||
* on the tracepoints.
|
||||
*
|
||||
* it_func[0] is never NULL because there is at least one element in the array
|
||||
* when the array itself is non NULL.
|
||||
*/
|
||||
#define __DEFINE_TRACE_EXT(_name, _ext, proto, args) \
|
||||
static const char __tpstrtab_##_name[] \
|
||||
@ -412,7 +387,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
|
||||
|
||||
#else /* !TRACEPOINTS_ENABLED */
|
||||
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
|
||||
#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \
|
||||
static inline void trace_##name(proto) \
|
||||
{ } \
|
||||
static inline int \
|
||||
@ -436,7 +411,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
return false; \
|
||||
}
|
||||
|
||||
#define __DECLARE_TRACE_SYSCALL __DECLARE_TRACE
|
||||
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
|
||||
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto))
|
||||
|
||||
#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \
|
||||
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto))
|
||||
|
||||
#define DEFINE_TRACE_FN(name, reg, unreg, proto, args)
|
||||
#define DEFINE_TRACE_SYSCALL(name, reg, unreg, proto, args)
|
||||
@ -502,7 +481,6 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
|
||||
#define DECLARE_TRACE_SYSCALL(name, proto, args) \
|
||||
__DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args), \
|
||||
cpu_online(raw_smp_processor_id()), \
|
||||
PARAMS(void *__data, proto))
|
||||
|
||||
#define TRACE_EVENT_FLAGS(event, flag)
|
||||
|
@ -2552,6 +2552,8 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
|
||||
trace_flags |= TRACE_FLAG_NEED_RESCHED;
|
||||
if (test_preempt_need_resched())
|
||||
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY))
|
||||
trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY;
|
||||
return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
|
||||
(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
|
||||
}
|
||||
|
@ -462,17 +462,29 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
|
||||
bh_off ? 'b' :
|
||||
'.';
|
||||
|
||||
switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
|
||||
switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY |
|
||||
TRACE_FLAG_PREEMPT_RESCHED)) {
|
||||
case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
|
||||
need_resched = 'B';
|
||||
break;
|
||||
case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
|
||||
need_resched = 'N';
|
||||
break;
|
||||
case TRACE_FLAG_NEED_RESCHED_LAZY | TRACE_FLAG_PREEMPT_RESCHED:
|
||||
need_resched = 'L';
|
||||
break;
|
||||
case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_NEED_RESCHED_LAZY:
|
||||
need_resched = 'b';
|
||||
break;
|
||||
case TRACE_FLAG_NEED_RESCHED:
|
||||
need_resched = 'n';
|
||||
break;
|
||||
case TRACE_FLAG_PREEMPT_RESCHED:
|
||||
need_resched = 'p';
|
||||
break;
|
||||
case TRACE_FLAG_NEED_RESCHED_LAZY:
|
||||
need_resched = 'l';
|
||||
break;
|
||||
default:
|
||||
need_resched = '.';
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user