mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-17 05:45:20 +00:00
Merge branch 'core-stacktrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull stack trace updates from Ingo Molnar: "So Thomas looked at the stacktrace code recently and noticed a few weirdnesses, and we all know how such stories of crummy kernel code meeting German engineering perfection end: a 45-patch series to clean it all up! :-) Here's the changes in Thomas's words: 'Struct stack_trace is a sinkhole for input and output parameters which is largely pointless for most usage sites. In fact if embedded into other data structures it creates indirections and extra storage overhead for no benefit. Looking at all usage sites makes it clear that they just require an interface which is based on a storage array. That array is either on stack, global or embedded into some other data structure. Some of the stack depot usage sites are outright wrong, but fortunately the wrongness just causes more stack being used for nothing and does not have functional impact. Another oddity is the inconsistent termination of the stack trace with ULONG_MAX. It's pointless as the number of entries is what determines the length of the stored trace. In fact quite some call sites remove the ULONG_MAX marker afterwards with or without nasty comments about it. Not all architectures do that and those which do, do it inconsistenly either conditional on nr_entries == 0 or unconditionally. The following series cleans that up by: 1) Removing the ULONG_MAX termination in the architecture code 2) Removing the ULONG_MAX fixups at the call sites 3) Providing plain storage array based interfaces for stacktrace and stackdepot. 4) Cleaning up the mess at the callsites including some related cleanups. 5) Removing the struct stack_trace based interfaces This is not changing the struct stack_trace interfaces at the architecture level, but it removes the exposure to the generic code'" * 'core-stacktrace-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits) x86/stacktrace: Use common infrastructure stacktrace: Provide common infrastructure lib/stackdepot: Remove obsolete functions stacktrace: Remove obsolete functions livepatch: Simplify stack trace retrieval tracing: Remove the last struct stack_trace usage tracing: Simplify stack trace retrieval tracing: Make ftrace_trace_userstack() static and conditional tracing: Use percpu stack trace buffer more intelligently tracing: Simplify stacktrace retrieval in histograms lockdep: Simplify stack trace handling lockdep: Remove save argument from check_prev_add() lockdep: Remove unused trace argument from print_circular_bug() drm: Simplify stacktrace handling dm persistent data: Simplify stack trace handling dm bufio: Simplify stack trace retrieval btrfs: ref-verify: Simplify stack trace retrieval dma/debug: Simplify stracktrace retrieval fault-inject: Simplify stacktrace retrieval mm/page_owner: Simplify stack trace handling ...
This commit is contained in:
commit
2c6a392cdd
@ -115,8 +115,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
|
|||||||
* running on another CPU? For now, ignore it as we
|
* running on another CPU? For now, ignore it as we
|
||||||
* can't guarantee we won't explode.
|
* can't guarantee we won't explode.
|
||||||
*/
|
*/
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
return;
|
return;
|
||||||
#else
|
#else
|
||||||
frame.fp = thread_saved_fp(tsk);
|
frame.fp = thread_saved_fp(tsk);
|
||||||
@ -134,8 +132,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
|
|||||||
}
|
}
|
||||||
|
|
||||||
walk_stackframe(&frame, save_trace, &data);
|
walk_stackframe(&frame, save_trace, &data);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
||||||
@ -153,8 +149,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
|||||||
frame.pc = regs->ARM_pc;
|
frame.pc = regs->ARM_pc;
|
||||||
|
|
||||||
walk_stackframe(&frame, save_trace, &data);
|
walk_stackframe(&frame, save_trace, &data);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
||||||
|
@ -140,8 +140,6 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
walk_stackframe(current, &frame, save_trace, &data);
|
walk_stackframe(current, &frame, save_trace, &data);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
|
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
|
||||||
|
|
||||||
@ -172,8 +170,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
walk_stackframe(tsk, &frame, save_trace, &data);
|
walk_stackframe(tsk, &frame, save_trace, &data);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
|
|
||||||
put_task_stack(tsk);
|
put_task_stack(tsk);
|
||||||
}
|
}
|
||||||
|
@ -29,22 +29,17 @@ static void dump_trace(struct task_struct *task, struct stack_trace *trace)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Save stack-backtrace addresses into a stack_trace buffer.
|
* Save stack-backtrace addresses into a stack_trace buffer.
|
||||||
*/
|
*/
|
||||||
void save_stack_trace(struct stack_trace *trace)
|
void save_stack_trace(struct stack_trace *trace)
|
||||||
{
|
{
|
||||||
dump_trace(current, trace);
|
dump_trace(current, trace);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace);
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
||||||
|
|
||||||
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
||||||
{
|
{
|
||||||
dump_trace(tsk, trace);
|
dump_trace(tsk, trace);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
||||||
|
@ -169,8 +169,6 @@ static bool save_trace(unsigned long pc, void *arg)
|
|||||||
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
||||||
{
|
{
|
||||||
walk_stackframe(tsk, NULL, save_trace, trace);
|
walk_stackframe(tsk, NULL, save_trace, trace);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
||||||
|
|
||||||
|
@ -45,8 +45,6 @@ void save_stack_trace(struct stack_trace *trace)
|
|||||||
|
|
||||||
sp = current_stack_pointer();
|
sp = current_stack_pointer();
|
||||||
dump_trace(save_address, trace, NULL, sp);
|
dump_trace(save_address, trace, NULL, sp);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace);
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
||||||
|
|
||||||
@ -58,8 +56,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|||||||
if (tsk == current)
|
if (tsk == current)
|
||||||
sp = current_stack_pointer();
|
sp = current_stack_pointer();
|
||||||
dump_trace(save_address_nosched, trace, tsk, sp);
|
dump_trace(save_address_nosched, trace, tsk, sp);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
||||||
|
|
||||||
@ -69,7 +65,5 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
|||||||
|
|
||||||
sp = kernel_stack_pointer(regs);
|
sp = kernel_stack_pointer(regs);
|
||||||
dump_trace(save_address, trace, NULL, sp);
|
dump_trace(save_address, trace, NULL, sp);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
|
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
|
||||||
|
@ -49,8 +49,6 @@ void save_stack_trace(struct stack_trace *trace)
|
|||||||
unsigned long *sp = (unsigned long *)current_stack_pointer;
|
unsigned long *sp = (unsigned long *)current_stack_pointer;
|
||||||
|
|
||||||
unwind_stack(current, NULL, sp, &save_stack_ops, trace);
|
unwind_stack(current, NULL, sp, &save_stack_ops, trace);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace);
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
||||||
|
|
||||||
@ -84,7 +82,5 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|||||||
unsigned long *sp = (unsigned long *)tsk->thread.sp;
|
unsigned long *sp = (unsigned long *)tsk->thread.sp;
|
||||||
|
|
||||||
unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
|
unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
||||||
|
@ -63,8 +63,6 @@ static const struct stacktrace_ops dump_ops = {
|
|||||||
static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace)
|
static void __save_stack_trace(struct task_struct *tsk, struct stack_trace *trace)
|
||||||
{
|
{
|
||||||
dump_trace(tsk, &dump_ops, trace);
|
dump_trace(tsk, &dump_ops, trace);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void save_stack_trace(struct stack_trace *trace)
|
void save_stack_trace(struct stack_trace *trace)
|
||||||
|
@ -120,8 +120,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|||||||
}
|
}
|
||||||
|
|
||||||
walk_stackframe(&frame, save_trace, &data);
|
walk_stackframe(&frame, save_trace, &data);
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void save_stack_trace(struct stack_trace *trace)
|
void save_stack_trace(struct stack_trace *trace)
|
||||||
|
@ -74,6 +74,7 @@ config X86
|
|||||||
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
|
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
|
||||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||||
|
select ARCH_STACKWALK
|
||||||
select ARCH_SUPPORTS_ACPI
|
select ARCH_SUPPORTS_ACPI
|
||||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||||
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
|
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
|
||||||
|
@ -12,78 +12,31 @@
|
|||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
#include <asm/unwind.h>
|
#include <asm/unwind.h>
|
||||||
|
|
||||||
static int save_stack_address(struct stack_trace *trace, unsigned long addr,
|
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
||||||
bool nosched)
|
struct task_struct *task, struct pt_regs *regs)
|
||||||
{
|
|
||||||
if (nosched && in_sched_functions(addr))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (trace->skip > 0) {
|
|
||||||
trace->skip--;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (trace->nr_entries >= trace->max_entries)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
trace->entries[trace->nr_entries++] = addr;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void noinline __save_stack_trace(struct stack_trace *trace,
|
|
||||||
struct task_struct *task, struct pt_regs *regs,
|
|
||||||
bool nosched)
|
|
||||||
{
|
{
|
||||||
struct unwind_state state;
|
struct unwind_state state;
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
|
||||||
if (regs)
|
if (regs && !consume_entry(cookie, regs->ip, false))
|
||||||
save_stack_address(trace, regs->ip, nosched);
|
return;
|
||||||
|
|
||||||
for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
|
for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
|
||||||
unwind_next_frame(&state)) {
|
unwind_next_frame(&state)) {
|
||||||
addr = unwind_get_return_address(&state);
|
addr = unwind_get_return_address(&state);
|
||||||
if (!addr || save_stack_address(trace, addr, nosched))
|
if (!addr || !consume_entry(cookie, addr, false))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Save stack-backtrace addresses into a stack_trace buffer.
|
* This function returns an error if it detects any unreliable features of the
|
||||||
|
* stack. Otherwise it guarantees that the stack trace is reliable.
|
||||||
|
*
|
||||||
|
* If the task is not 'current', the caller *must* ensure the task is inactive.
|
||||||
*/
|
*/
|
||||||
void save_stack_trace(struct stack_trace *trace)
|
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
|
||||||
{
|
void *cookie, struct task_struct *task)
|
||||||
trace->skip++;
|
|
||||||
__save_stack_trace(trace, current, NULL, false);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace);
|
|
||||||
|
|
||||||
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
|
|
||||||
{
|
|
||||||
__save_stack_trace(trace, current, regs, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
||||||
{
|
|
||||||
if (!try_get_task_stack(tsk))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (tsk == current)
|
|
||||||
trace->skip++;
|
|
||||||
__save_stack_trace(trace, tsk, NULL, true);
|
|
||||||
|
|
||||||
put_task_stack(tsk);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
|
|
||||||
|
|
||||||
static int __always_inline
|
|
||||||
__save_stack_trace_reliable(struct stack_trace *trace,
|
|
||||||
struct task_struct *task)
|
|
||||||
{
|
{
|
||||||
struct unwind_state state;
|
struct unwind_state state;
|
||||||
struct pt_regs *regs;
|
struct pt_regs *regs;
|
||||||
@ -97,7 +50,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
|
|||||||
if (regs) {
|
if (regs) {
|
||||||
/* Success path for user tasks */
|
/* Success path for user tasks */
|
||||||
if (user_mode(regs))
|
if (user_mode(regs))
|
||||||
goto success;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kernel mode registers on the stack indicate an
|
* Kernel mode registers on the stack indicate an
|
||||||
@ -120,7 +73,7 @@ __save_stack_trace_reliable(struct stack_trace *trace,
|
|||||||
if (!addr)
|
if (!addr)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (save_stack_address(trace, addr, false))
|
if (!consume_entry(cookie, addr, false))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,39 +85,9 @@ __save_stack_trace_reliable(struct stack_trace *trace,
|
|||||||
if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
|
if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
success:
|
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* This function returns an error if it detects any unreliable features of the
|
|
||||||
* stack. Otherwise it guarantees that the stack trace is reliable.
|
|
||||||
*
|
|
||||||
* If the task is not 'current', the caller *must* ensure the task is inactive.
|
|
||||||
*/
|
|
||||||
int save_stack_trace_tsk_reliable(struct task_struct *tsk,
|
|
||||||
struct stack_trace *trace)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If the task doesn't have a stack (e.g., a zombie), the stack is
|
|
||||||
* "reliably" empty.
|
|
||||||
*/
|
|
||||||
if (!try_get_task_stack(tsk))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
ret = __save_stack_trace_reliable(trace, tsk);
|
|
||||||
|
|
||||||
put_task_stack(tsk);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
|
|
||||||
|
|
||||||
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
|
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
|
||||||
|
|
||||||
struct stack_frame_user {
|
struct stack_frame_user {
|
||||||
@ -189,15 +112,15 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __save_stack_trace_user(struct stack_trace *trace)
|
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
|
||||||
|
const struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
const struct pt_regs *regs = task_pt_regs(current);
|
|
||||||
const void __user *fp = (const void __user *)regs->bp;
|
const void __user *fp = (const void __user *)regs->bp;
|
||||||
|
|
||||||
if (trace->nr_entries < trace->max_entries)
|
if (!consume_entry(cookie, regs->ip, false))
|
||||||
trace->entries[trace->nr_entries++] = regs->ip;
|
return;
|
||||||
|
|
||||||
while (trace->nr_entries < trace->max_entries) {
|
while (1) {
|
||||||
struct stack_frame_user frame;
|
struct stack_frame_user frame;
|
||||||
|
|
||||||
frame.next_fp = NULL;
|
frame.next_fp = NULL;
|
||||||
@ -207,8 +130,8 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
|
|||||||
if ((unsigned long)fp < regs->sp)
|
if ((unsigned long)fp < regs->sp)
|
||||||
break;
|
break;
|
||||||
if (frame.ret_addr) {
|
if (frame.ret_addr) {
|
||||||
trace->entries[trace->nr_entries++] =
|
if (!consume_entry(cookie, frame.ret_addr, false))
|
||||||
frame.ret_addr;
|
return;
|
||||||
}
|
}
|
||||||
if (fp == frame.next_fp)
|
if (fp == frame.next_fp)
|
||||||
break;
|
break;
|
||||||
@ -216,14 +139,3 @@ static inline void __save_stack_trace_user(struct stack_trace *trace)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void save_stack_trace_user(struct stack_trace *trace)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Trace user stack if we are not a kernel thread
|
|
||||||
*/
|
|
||||||
if (current->mm) {
|
|
||||||
__save_stack_trace_user(trace);
|
|
||||||
}
|
|
||||||
if (trace->nr_entries < trace->max_entries)
|
|
||||||
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
||||||
}
|
|
||||||
|
@ -106,25 +106,19 @@
|
|||||||
static noinline void save_stack(struct drm_mm_node *node)
|
static noinline void save_stack(struct drm_mm_node *node)
|
||||||
{
|
{
|
||||||
unsigned long entries[STACKDEPTH];
|
unsigned long entries[STACKDEPTH];
|
||||||
struct stack_trace trace = {
|
unsigned int n;
|
||||||
.entries = entries,
|
|
||||||
.max_entries = STACKDEPTH,
|
|
||||||
.skip = 1
|
|
||||||
};
|
|
||||||
|
|
||||||
save_stack_trace(&trace);
|
n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
|
||||||
if (trace.nr_entries != 0 &&
|
|
||||||
trace.entries[trace.nr_entries-1] == ULONG_MAX)
|
|
||||||
trace.nr_entries--;
|
|
||||||
|
|
||||||
/* May be called under spinlock, so avoid sleeping */
|
/* May be called under spinlock, so avoid sleeping */
|
||||||
node->stack = depot_save_stack(&trace, GFP_NOWAIT);
|
node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void show_leaks(struct drm_mm *mm)
|
static void show_leaks(struct drm_mm *mm)
|
||||||
{
|
{
|
||||||
struct drm_mm_node *node;
|
struct drm_mm_node *node;
|
||||||
unsigned long entries[STACKDEPTH];
|
unsigned long *entries;
|
||||||
|
unsigned int nr_entries;
|
||||||
char *buf;
|
char *buf;
|
||||||
|
|
||||||
buf = kmalloc(BUFSZ, GFP_KERNEL);
|
buf = kmalloc(BUFSZ, GFP_KERNEL);
|
||||||
@ -132,19 +126,14 @@ static void show_leaks(struct drm_mm *mm)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
|
list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
|
||||||
struct stack_trace trace = {
|
|
||||||
.entries = entries,
|
|
||||||
.max_entries = STACKDEPTH
|
|
||||||
};
|
|
||||||
|
|
||||||
if (!node->stack) {
|
if (!node->stack) {
|
||||||
DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
|
DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
|
||||||
node->start, node->size);
|
node->start, node->size);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
depot_fetch_stack(node->stack, &trace);
|
nr_entries = stack_depot_fetch(node->stack, &entries);
|
||||||
snprint_stack_trace(buf, BUFSZ, &trace, 0);
|
stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
|
||||||
DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
|
DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
|
||||||
node->start, node->size, buf);
|
node->start, node->size, buf);
|
||||||
}
|
}
|
||||||
|
@ -36,11 +36,8 @@
|
|||||||
|
|
||||||
static void vma_print_allocator(struct i915_vma *vma, const char *reason)
|
static void vma_print_allocator(struct i915_vma *vma, const char *reason)
|
||||||
{
|
{
|
||||||
unsigned long entries[12];
|
unsigned long *entries;
|
||||||
struct stack_trace trace = {
|
unsigned int nr_entries;
|
||||||
.entries = entries,
|
|
||||||
.max_entries = ARRAY_SIZE(entries),
|
|
||||||
};
|
|
||||||
char buf[512];
|
char buf[512];
|
||||||
|
|
||||||
if (!vma->node.stack) {
|
if (!vma->node.stack) {
|
||||||
@ -49,8 +46,8 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
depot_fetch_stack(vma->node.stack, &trace);
|
nr_entries = stack_depot_fetch(vma->node.stack, &entries);
|
||||||
snprint_stack_trace(buf, sizeof(buf), &trace, 0);
|
stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
|
||||||
DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
|
DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
|
||||||
vma->node.start, vma->node.size, reason, buf);
|
vma->node.start, vma->node.size, reason, buf);
|
||||||
}
|
}
|
||||||
|
@ -60,31 +60,20 @@
|
|||||||
static noinline depot_stack_handle_t __save_depot_stack(void)
|
static noinline depot_stack_handle_t __save_depot_stack(void)
|
||||||
{
|
{
|
||||||
unsigned long entries[STACKDEPTH];
|
unsigned long entries[STACKDEPTH];
|
||||||
struct stack_trace trace = {
|
unsigned int n;
|
||||||
.entries = entries,
|
|
||||||
.max_entries = ARRAY_SIZE(entries),
|
|
||||||
.skip = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
save_stack_trace(&trace);
|
n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
|
||||||
if (trace.nr_entries &&
|
return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
|
||||||
trace.entries[trace.nr_entries - 1] == ULONG_MAX)
|
|
||||||
trace.nr_entries--;
|
|
||||||
|
|
||||||
return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __print_depot_stack(depot_stack_handle_t stack,
|
static void __print_depot_stack(depot_stack_handle_t stack,
|
||||||
char *buf, int sz, int indent)
|
char *buf, int sz, int indent)
|
||||||
{
|
{
|
||||||
unsigned long entries[STACKDEPTH];
|
unsigned long *entries;
|
||||||
struct stack_trace trace = {
|
unsigned int nr_entries;
|
||||||
.entries = entries,
|
|
||||||
.max_entries = ARRAY_SIZE(entries),
|
|
||||||
};
|
|
||||||
|
|
||||||
depot_fetch_stack(stack, &trace);
|
nr_entries = stack_depot_fetch(stack, &entries);
|
||||||
snprint_stack_trace(buf, sz, &trace, indent);
|
stack_trace_snprint(buf, sz, entries, nr_entries, indent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
|
||||||
|
@ -150,7 +150,7 @@ struct dm_buffer {
|
|||||||
void (*end_io)(struct dm_buffer *, blk_status_t);
|
void (*end_io)(struct dm_buffer *, blk_status_t);
|
||||||
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
||||||
#define MAX_STACK 10
|
#define MAX_STACK 10
|
||||||
struct stack_trace stack_trace;
|
unsigned int stack_len;
|
||||||
unsigned long stack_entries[MAX_STACK];
|
unsigned long stack_entries[MAX_STACK];
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
@ -232,11 +232,7 @@ static DEFINE_MUTEX(dm_bufio_clients_lock);
|
|||||||
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
||||||
static void buffer_record_stack(struct dm_buffer *b)
|
static void buffer_record_stack(struct dm_buffer *b)
|
||||||
{
|
{
|
||||||
b->stack_trace.nr_entries = 0;
|
b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
|
||||||
b->stack_trace.max_entries = MAX_STACK;
|
|
||||||
b->stack_trace.entries = b->stack_entries;
|
|
||||||
b->stack_trace.skip = 2;
|
|
||||||
save_stack_trace(&b->stack_trace);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -438,7 +434,7 @@ static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
|
|||||||
adjust_total_allocated(b->data_mode, (long)c->block_size);
|
adjust_total_allocated(b->data_mode, (long)c->block_size);
|
||||||
|
|
||||||
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
||||||
memset(&b->stack_trace, 0, sizeof(b->stack_trace));
|
b->stack_len = 0;
|
||||||
#endif
|
#endif
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
@ -1520,8 +1516,9 @@ static void drop_buffers(struct dm_bufio_client *c)
|
|||||||
DMERR("leaked buffer %llx, hold count %u, list %d",
|
DMERR("leaked buffer %llx, hold count %u, list %d",
|
||||||
(unsigned long long)b->block, b->hold_count, i);
|
(unsigned long long)b->block, b->hold_count, i);
|
||||||
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
||||||
print_stack_trace(&b->stack_trace, 1);
|
stack_trace_print(b->stack_entries, b->stack_len, 1);
|
||||||
b->hold_count = 0; /* mark unclaimed to avoid BUG_ON below */
|
/* mark unclaimed to avoid BUG_ON below */
|
||||||
|
b->hold_count = 0;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,7 +35,10 @@
|
|||||||
#define MAX_HOLDERS 4
|
#define MAX_HOLDERS 4
|
||||||
#define MAX_STACK 10
|
#define MAX_STACK 10
|
||||||
|
|
||||||
typedef unsigned long stack_entries[MAX_STACK];
|
struct stack_store {
|
||||||
|
unsigned int nr_entries;
|
||||||
|
unsigned long entries[MAX_STACK];
|
||||||
|
};
|
||||||
|
|
||||||
struct block_lock {
|
struct block_lock {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
@ -44,8 +47,7 @@ struct block_lock {
|
|||||||
struct task_struct *holders[MAX_HOLDERS];
|
struct task_struct *holders[MAX_HOLDERS];
|
||||||
|
|
||||||
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
||||||
struct stack_trace traces[MAX_HOLDERS];
|
struct stack_store traces[MAX_HOLDERS];
|
||||||
stack_entries entries[MAX_HOLDERS];
|
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -73,7 +75,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
|
|||||||
{
|
{
|
||||||
unsigned h = __find_holder(lock, NULL);
|
unsigned h = __find_holder(lock, NULL);
|
||||||
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
||||||
struct stack_trace *t;
|
struct stack_store *t;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
get_task_struct(task);
|
get_task_struct(task);
|
||||||
@ -81,11 +83,7 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
|
|||||||
|
|
||||||
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
||||||
t = lock->traces + h;
|
t = lock->traces + h;
|
||||||
t->nr_entries = 0;
|
t->nr_entries = stack_trace_save(t->entries, MAX_STACK, 2);
|
||||||
t->max_entries = MAX_STACK;
|
|
||||||
t->entries = lock->entries[h];
|
|
||||||
t->skip = 2;
|
|
||||||
save_stack_trace(t);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -106,7 +104,8 @@ static int __check_holder(struct block_lock *lock)
|
|||||||
DMERR("recursive lock detected in metadata");
|
DMERR("recursive lock detected in metadata");
|
||||||
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
|
||||||
DMERR("previously held here:");
|
DMERR("previously held here:");
|
||||||
print_stack_trace(lock->traces + i, 4);
|
stack_trace_print(lock->traces[i].entries,
|
||||||
|
lock->traces[i].nr_entries, 4);
|
||||||
|
|
||||||
DMERR("subsequent acquisition attempted here:");
|
DMERR("subsequent acquisition attempted here:");
|
||||||
dump_stack();
|
dump_stack();
|
||||||
|
@ -205,28 +205,17 @@ static struct root_entry *lookup_root_entry(struct rb_root *root, u64 objectid)
|
|||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
static void __save_stack_trace(struct ref_action *ra)
|
static void __save_stack_trace(struct ref_action *ra)
|
||||||
{
|
{
|
||||||
struct stack_trace stack_trace;
|
ra->trace_len = stack_trace_save(ra->trace, MAX_TRACE, 2);
|
||||||
|
|
||||||
stack_trace.max_entries = MAX_TRACE;
|
|
||||||
stack_trace.nr_entries = 0;
|
|
||||||
stack_trace.entries = ra->trace;
|
|
||||||
stack_trace.skip = 2;
|
|
||||||
save_stack_trace(&stack_trace);
|
|
||||||
ra->trace_len = stack_trace.nr_entries;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __print_stack_trace(struct btrfs_fs_info *fs_info,
|
static void __print_stack_trace(struct btrfs_fs_info *fs_info,
|
||||||
struct ref_action *ra)
|
struct ref_action *ra)
|
||||||
{
|
{
|
||||||
struct stack_trace trace;
|
|
||||||
|
|
||||||
if (ra->trace_len == 0) {
|
if (ra->trace_len == 0) {
|
||||||
btrfs_err(fs_info, " ref-verify: no stacktrace");
|
btrfs_err(fs_info, " ref-verify: no stacktrace");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
trace.nr_entries = ra->trace_len;
|
stack_trace_print(ra->trace, ra->trace_len, 2);
|
||||||
trace.entries = ra->trace;
|
|
||||||
print_stack_trace(&trace, 2);
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void inline __save_stack_trace(struct ref_action *ra)
|
static void inline __save_stack_trace(struct ref_action *ra)
|
||||||
|
@ -407,7 +407,6 @@ static void unlock_trace(struct task_struct *task)
|
|||||||
static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
|
static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
|
||||||
struct pid *pid, struct task_struct *task)
|
struct pid *pid, struct task_struct *task)
|
||||||
{
|
{
|
||||||
struct stack_trace trace;
|
|
||||||
unsigned long *entries;
|
unsigned long *entries;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@ -430,20 +429,17 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
|
|||||||
if (!entries)
|
if (!entries)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
trace.nr_entries = 0;
|
|
||||||
trace.max_entries = MAX_STACK_TRACE_DEPTH;
|
|
||||||
trace.entries = entries;
|
|
||||||
trace.skip = 0;
|
|
||||||
|
|
||||||
err = lock_trace(task);
|
err = lock_trace(task);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
unsigned int i;
|
unsigned int i, nr_entries;
|
||||||
|
|
||||||
save_stack_trace_tsk(task, &trace);
|
nr_entries = stack_trace_save_tsk(task, entries,
|
||||||
|
MAX_STACK_TRACE_DEPTH, 0);
|
||||||
|
|
||||||
for (i = 0; i < trace.nr_entries; i++) {
|
for (i = 0; i < nr_entries; i++) {
|
||||||
seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
|
seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock_trace(task);
|
unlock_trace(task);
|
||||||
}
|
}
|
||||||
kfree(entries);
|
kfree(entries);
|
||||||
@ -489,10 +485,9 @@ static int lstats_show_proc(struct seq_file *m, void *v)
|
|||||||
lr->count, lr->time, lr->max);
|
lr->count, lr->time, lr->max);
|
||||||
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
|
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
|
||||||
unsigned long bt = lr->backtrace[q];
|
unsigned long bt = lr->backtrace[q];
|
||||||
|
|
||||||
if (!bt)
|
if (!bt)
|
||||||
break;
|
break;
|
||||||
if (bt == ULONG_MAX)
|
|
||||||
break;
|
|
||||||
seq_printf(m, " %ps", (void *)bt);
|
seq_printf(m, " %ps", (void *)bt);
|
||||||
}
|
}
|
||||||
seq_putc(m, '\n');
|
seq_putc(m, '\n');
|
||||||
|
@ -241,21 +241,11 @@ static inline void ftrace_free_mem(struct module *mod, void *start, void *end) {
|
|||||||
|
|
||||||
#ifdef CONFIG_STACK_TRACER
|
#ifdef CONFIG_STACK_TRACER
|
||||||
|
|
||||||
#define STACK_TRACE_ENTRIES 500
|
|
||||||
|
|
||||||
struct stack_trace;
|
|
||||||
|
|
||||||
extern unsigned stack_trace_index[];
|
|
||||||
extern struct stack_trace stack_trace_max;
|
|
||||||
extern unsigned long stack_trace_max_size;
|
|
||||||
extern arch_spinlock_t stack_trace_max_lock;
|
|
||||||
|
|
||||||
extern int stack_tracer_enabled;
|
extern int stack_tracer_enabled;
|
||||||
void stack_trace_print(void);
|
|
||||||
int
|
int stack_trace_sysctl(struct ctl_table *table, int write,
|
||||||
stack_trace_sysctl(struct ctl_table *table, int write,
|
void __user *buffer, size_t *lenp,
|
||||||
void __user *buffer, size_t *lenp,
|
loff_t *ppos);
|
||||||
loff_t *ppos);
|
|
||||||
|
|
||||||
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
|
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
|
||||||
DECLARE_PER_CPU(int, disable_stack_tracer);
|
DECLARE_PER_CPU(int, disable_stack_tracer);
|
||||||
|
@ -66,6 +66,11 @@ struct lock_class_key {
|
|||||||
|
|
||||||
extern struct lock_class_key __lockdep_no_validate__;
|
extern struct lock_class_key __lockdep_no_validate__;
|
||||||
|
|
||||||
|
struct lock_trace {
|
||||||
|
unsigned int nr_entries;
|
||||||
|
unsigned int offset;
|
||||||
|
};
|
||||||
|
|
||||||
#define LOCKSTAT_POINTS 4
|
#define LOCKSTAT_POINTS 4
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -100,7 +105,7 @@ struct lock_class {
|
|||||||
* IRQ/softirq usage tracking bits:
|
* IRQ/softirq usage tracking bits:
|
||||||
*/
|
*/
|
||||||
unsigned long usage_mask;
|
unsigned long usage_mask;
|
||||||
struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
|
struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Generation counter, when doing certain classes of graph walking,
|
* Generation counter, when doing certain classes of graph walking,
|
||||||
@ -188,7 +193,7 @@ struct lock_list {
|
|||||||
struct list_head entry;
|
struct list_head entry;
|
||||||
struct lock_class *class;
|
struct lock_class *class;
|
||||||
struct lock_class *links_to;
|
struct lock_class *links_to;
|
||||||
struct stack_trace trace;
|
struct lock_trace trace;
|
||||||
int distance;
|
int distance;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -23,10 +23,10 @@
|
|||||||
|
|
||||||
typedef u32 depot_stack_handle_t;
|
typedef u32 depot_stack_handle_t;
|
||||||
|
|
||||||
struct stack_trace;
|
depot_stack_handle_t stack_depot_save(unsigned long *entries,
|
||||||
|
unsigned int nr_entries, gfp_t gfp_flags);
|
||||||
|
|
||||||
depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
|
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
|
||||||
|
unsigned long **entries);
|
||||||
void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -3,11 +3,64 @@
|
|||||||
#define __LINUX_STACKTRACE_H
|
#define __LINUX_STACKTRACE_H
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
#include <asm/errno.h>
|
||||||
|
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
struct pt_regs;
|
struct pt_regs;
|
||||||
|
|
||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
|
void stack_trace_print(unsigned long *trace, unsigned int nr_entries,
|
||||||
|
int spaces);
|
||||||
|
int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
|
||||||
|
unsigned int nr_entries, int spaces);
|
||||||
|
unsigned int stack_trace_save(unsigned long *store, unsigned int size,
|
||||||
|
unsigned int skipnr);
|
||||||
|
unsigned int stack_trace_save_tsk(struct task_struct *task,
|
||||||
|
unsigned long *store, unsigned int size,
|
||||||
|
unsigned int skipnr);
|
||||||
|
unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
|
||||||
|
unsigned int size, unsigned int skipnr);
|
||||||
|
unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
|
||||||
|
|
||||||
|
/* Internal interfaces. Do not use in generic code */
|
||||||
|
#ifdef CONFIG_ARCH_STACKWALK
|
||||||
|
|
||||||
|
/**
|
||||||
|
* stack_trace_consume_fn - Callback for arch_stack_walk()
|
||||||
|
* @cookie: Caller supplied pointer handed back by arch_stack_walk()
|
||||||
|
* @addr: The stack entry address to consume
|
||||||
|
* @reliable: True when the stack entry is reliable. Required by
|
||||||
|
* some printk based consumers.
|
||||||
|
*
|
||||||
|
* Return: True, if the entry was consumed or skipped
|
||||||
|
* False, if there is no space left to store
|
||||||
|
*/
|
||||||
|
typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr,
|
||||||
|
bool reliable);
|
||||||
|
/**
|
||||||
|
* arch_stack_walk - Architecture specific function to walk the stack
|
||||||
|
* @consume_entry: Callback which is invoked by the architecture code for
|
||||||
|
* each entry.
|
||||||
|
* @cookie: Caller supplied pointer which is handed back to
|
||||||
|
* @consume_entry
|
||||||
|
* @task: Pointer to a task struct, can be NULL
|
||||||
|
* @regs: Pointer to registers, can be NULL
|
||||||
|
*
|
||||||
|
* ============ ======= ============================================
|
||||||
|
* task regs
|
||||||
|
* ============ ======= ============================================
|
||||||
|
* task NULL Stack trace from task (can be current)
|
||||||
|
* current regs Stack trace starting on regs->stackpointer
|
||||||
|
* ============ ======= ============================================
|
||||||
|
*/
|
||||||
|
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
||||||
|
struct task_struct *task, struct pt_regs *regs);
|
||||||
|
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
|
||||||
|
struct task_struct *task);
|
||||||
|
void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
|
||||||
|
const struct pt_regs *regs);
|
||||||
|
|
||||||
|
#else /* CONFIG_ARCH_STACKWALK */
|
||||||
struct stack_trace {
|
struct stack_trace {
|
||||||
unsigned int nr_entries, max_entries;
|
unsigned int nr_entries, max_entries;
|
||||||
unsigned long *entries;
|
unsigned long *entries;
|
||||||
@ -21,24 +74,20 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
|
|||||||
struct stack_trace *trace);
|
struct stack_trace *trace);
|
||||||
extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
|
extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
|
||||||
struct stack_trace *trace);
|
struct stack_trace *trace);
|
||||||
|
|
||||||
extern void print_stack_trace(struct stack_trace *trace, int spaces);
|
|
||||||
extern int snprint_stack_trace(char *buf, size_t size,
|
|
||||||
struct stack_trace *trace, int spaces);
|
|
||||||
|
|
||||||
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
|
|
||||||
extern void save_stack_trace_user(struct stack_trace *trace);
|
extern void save_stack_trace_user(struct stack_trace *trace);
|
||||||
#else
|
#endif /* !CONFIG_ARCH_STACKWALK */
|
||||||
# define save_stack_trace_user(trace) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#else /* !CONFIG_STACKTRACE */
|
|
||||||
# define save_stack_trace(trace) do { } while (0)
|
|
||||||
# define save_stack_trace_tsk(tsk, trace) do { } while (0)
|
|
||||||
# define save_stack_trace_user(trace) do { } while (0)
|
|
||||||
# define print_stack_trace(trace, spaces) do { } while (0)
|
|
||||||
# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
|
|
||||||
# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; })
|
|
||||||
#endif /* CONFIG_STACKTRACE */
|
#endif /* CONFIG_STACKTRACE */
|
||||||
|
|
||||||
|
#if defined(CONFIG_STACKTRACE) && defined(CONFIG_HAVE_RELIABLE_STACKTRACE)
|
||||||
|
int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
|
||||||
|
unsigned int size);
|
||||||
|
#else
|
||||||
|
static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk,
|
||||||
|
unsigned long *store,
|
||||||
|
unsigned int size)
|
||||||
|
{
|
||||||
|
return -ENOSYS;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* __LINUX_STACKTRACE_H */
|
#endif /* __LINUX_STACKTRACE_H */
|
||||||
|
@ -48,19 +48,14 @@ static void backtrace_test_irq(void)
|
|||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
static void backtrace_test_saved(void)
|
static void backtrace_test_saved(void)
|
||||||
{
|
{
|
||||||
struct stack_trace trace;
|
|
||||||
unsigned long entries[8];
|
unsigned long entries[8];
|
||||||
|
unsigned int nr_entries;
|
||||||
|
|
||||||
pr_info("Testing a saved backtrace.\n");
|
pr_info("Testing a saved backtrace.\n");
|
||||||
pr_info("The following trace is a kernel self test and not a bug!\n");
|
pr_info("The following trace is a kernel self test and not a bug!\n");
|
||||||
|
|
||||||
trace.nr_entries = 0;
|
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
|
||||||
trace.max_entries = ARRAY_SIZE(entries);
|
stack_trace_print(entries, nr_entries, 0);
|
||||||
trace.entries = entries;
|
|
||||||
trace.skip = 0;
|
|
||||||
|
|
||||||
save_stack_trace(&trace);
|
|
||||||
print_stack_trace(&trace, 0);
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void backtrace_test_saved(void)
|
static void backtrace_test_saved(void)
|
||||||
|
@ -89,8 +89,8 @@ struct dma_debug_entry {
|
|||||||
int sg_mapped_ents;
|
int sg_mapped_ents;
|
||||||
enum map_err_types map_err_type;
|
enum map_err_types map_err_type;
|
||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
struct stack_trace stacktrace;
|
unsigned int stack_len;
|
||||||
unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
|
unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -174,7 +174,7 @@ static inline void dump_entry_trace(struct dma_debug_entry *entry)
|
|||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
if (entry) {
|
if (entry) {
|
||||||
pr_warning("Mapped at:\n");
|
pr_warning("Mapped at:\n");
|
||||||
print_stack_trace(&entry->stacktrace, 0);
|
stack_trace_print(entry->stack_entries, entry->stack_len, 0);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@ -704,12 +704,10 @@ static struct dma_debug_entry *dma_entry_alloc(void)
|
|||||||
spin_unlock_irqrestore(&free_entries_lock, flags);
|
spin_unlock_irqrestore(&free_entries_lock, flags);
|
||||||
|
|
||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
|
entry->stack_len = stack_trace_save(entry->stack_entries,
|
||||||
entry->stacktrace.entries = entry->st_entries;
|
ARRAY_SIZE(entry->stack_entries),
|
||||||
entry->stacktrace.skip = 1;
|
1);
|
||||||
save_stack_trace(&entry->stacktrace);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,8 +120,8 @@ account_global_scheduler_latency(struct task_struct *tsk,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 0 and ULONG_MAX entries mean end of backtrace: */
|
/* 0 entry marks end of backtrace: */
|
||||||
if (record == 0 || record == ULONG_MAX)
|
if (!record)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (same) {
|
if (same) {
|
||||||
@ -141,20 +141,6 @@ account_global_scheduler_latency(struct task_struct *tsk,
|
|||||||
memcpy(&latency_record[i], lat, sizeof(struct latency_record));
|
memcpy(&latency_record[i], lat, sizeof(struct latency_record));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Iterator to store a backtrace into a latency record entry
|
|
||||||
*/
|
|
||||||
static inline void store_stacktrace(struct task_struct *tsk,
|
|
||||||
struct latency_record *lat)
|
|
||||||
{
|
|
||||||
struct stack_trace trace;
|
|
||||||
|
|
||||||
memset(&trace, 0, sizeof(trace));
|
|
||||||
trace.max_entries = LT_BACKTRACEDEPTH;
|
|
||||||
trace.entries = &lat->backtrace[0];
|
|
||||||
save_stack_trace_tsk(tsk, &trace);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __account_scheduler_latency - record an occurred latency
|
* __account_scheduler_latency - record an occurred latency
|
||||||
* @tsk - the task struct of the task hitting the latency
|
* @tsk - the task struct of the task hitting the latency
|
||||||
@ -191,7 +177,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
|||||||
lat.count = 1;
|
lat.count = 1;
|
||||||
lat.time = usecs;
|
lat.time = usecs;
|
||||||
lat.max = usecs;
|
lat.max = usecs;
|
||||||
store_stacktrace(tsk, &lat);
|
|
||||||
|
stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&latency_lock, flags);
|
raw_spin_lock_irqsave(&latency_lock, flags);
|
||||||
|
|
||||||
@ -210,8 +197,8 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 0 and ULONG_MAX entries mean end of backtrace: */
|
/* 0 entry is end of backtrace */
|
||||||
if (record == 0 || record == ULONG_MAX)
|
if (!record)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (same) {
|
if (same) {
|
||||||
@ -252,10 +239,10 @@ static int lstats_show(struct seq_file *m, void *v)
|
|||||||
lr->count, lr->time, lr->max);
|
lr->count, lr->time, lr->max);
|
||||||
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
|
for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
|
||||||
unsigned long bt = lr->backtrace[q];
|
unsigned long bt = lr->backtrace[q];
|
||||||
|
|
||||||
if (!bt)
|
if (!bt)
|
||||||
break;
|
break;
|
||||||
if (bt == ULONG_MAX)
|
|
||||||
break;
|
|
||||||
seq_printf(m, " %ps", (void *)bt);
|
seq_printf(m, " %ps", (void *)bt);
|
||||||
}
|
}
|
||||||
seq_puts(m, "\n");
|
seq_puts(m, "\n");
|
||||||
|
@ -202,15 +202,15 @@ void klp_update_patch_state(struct task_struct *task)
|
|||||||
* Determine whether the given stack trace includes any references to a
|
* Determine whether the given stack trace includes any references to a
|
||||||
* to-be-patched or to-be-unpatched function.
|
* to-be-patched or to-be-unpatched function.
|
||||||
*/
|
*/
|
||||||
static int klp_check_stack_func(struct klp_func *func,
|
static int klp_check_stack_func(struct klp_func *func, unsigned long *entries,
|
||||||
struct stack_trace *trace)
|
unsigned int nr_entries)
|
||||||
{
|
{
|
||||||
unsigned long func_addr, func_size, address;
|
unsigned long func_addr, func_size, address;
|
||||||
struct klp_ops *ops;
|
struct klp_ops *ops;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < trace->nr_entries; i++) {
|
for (i = 0; i < nr_entries; i++) {
|
||||||
address = trace->entries[i];
|
address = entries[i];
|
||||||
|
|
||||||
if (klp_target_state == KLP_UNPATCHED) {
|
if (klp_target_state == KLP_UNPATCHED) {
|
||||||
/*
|
/*
|
||||||
@ -254,29 +254,25 @@ static int klp_check_stack_func(struct klp_func *func,
|
|||||||
static int klp_check_stack(struct task_struct *task, char *err_buf)
|
static int klp_check_stack(struct task_struct *task, char *err_buf)
|
||||||
{
|
{
|
||||||
static unsigned long entries[MAX_STACK_ENTRIES];
|
static unsigned long entries[MAX_STACK_ENTRIES];
|
||||||
struct stack_trace trace;
|
|
||||||
struct klp_object *obj;
|
struct klp_object *obj;
|
||||||
struct klp_func *func;
|
struct klp_func *func;
|
||||||
int ret;
|
int ret, nr_entries;
|
||||||
|
|
||||||
trace.skip = 0;
|
ret = stack_trace_save_tsk_reliable(task, entries, ARRAY_SIZE(entries));
|
||||||
trace.nr_entries = 0;
|
|
||||||
trace.max_entries = MAX_STACK_ENTRIES;
|
|
||||||
trace.entries = entries;
|
|
||||||
ret = save_stack_trace_tsk_reliable(task, &trace);
|
|
||||||
WARN_ON_ONCE(ret == -ENOSYS);
|
WARN_ON_ONCE(ret == -ENOSYS);
|
||||||
if (ret) {
|
if (ret < 0) {
|
||||||
snprintf(err_buf, STACK_ERR_BUF_SIZE,
|
snprintf(err_buf, STACK_ERR_BUF_SIZE,
|
||||||
"%s: %s:%d has an unreliable stack\n",
|
"%s: %s:%d has an unreliable stack\n",
|
||||||
__func__, task->comm, task->pid);
|
__func__, task->comm, task->pid);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
nr_entries = ret;
|
||||||
|
|
||||||
klp_for_each_object(klp_transition_patch, obj) {
|
klp_for_each_object(klp_transition_patch, obj) {
|
||||||
if (!obj->patched)
|
if (!obj->patched)
|
||||||
continue;
|
continue;
|
||||||
klp_for_each_func(obj, func) {
|
klp_for_each_func(obj, func) {
|
||||||
ret = klp_check_stack_func(func, &trace);
|
ret = klp_check_stack_func(func, entries, nr_entries);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
snprintf(err_buf, STACK_ERR_BUF_SIZE,
|
snprintf(err_buf, STACK_ERR_BUF_SIZE,
|
||||||
"%s: %s:%d is sleeping on function %s\n",
|
"%s: %s:%d is sleeping on function %s\n",
|
||||||
|
@ -434,29 +434,14 @@ static void print_lockdep_off(const char *bug_msg)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static int save_trace(struct stack_trace *trace)
|
static int save_trace(struct lock_trace *trace)
|
||||||
{
|
{
|
||||||
trace->nr_entries = 0;
|
unsigned long *entries = stack_trace + nr_stack_trace_entries;
|
||||||
trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
|
unsigned int max_entries;
|
||||||
trace->entries = stack_trace + nr_stack_trace_entries;
|
|
||||||
|
|
||||||
trace->skip = 3;
|
|
||||||
|
|
||||||
save_stack_trace(trace);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Some daft arches put -1 at the end to indicate its a full trace.
|
|
||||||
*
|
|
||||||
* <rant> this is buggy anyway, since it takes a whole extra entry so a
|
|
||||||
* complete trace that maxes out the entries provided will be reported
|
|
||||||
* as incomplete, friggin useless </rant>
|
|
||||||
*/
|
|
||||||
if (trace->nr_entries != 0 &&
|
|
||||||
trace->entries[trace->nr_entries-1] == ULONG_MAX)
|
|
||||||
trace->nr_entries--;
|
|
||||||
|
|
||||||
trace->max_entries = trace->nr_entries;
|
|
||||||
|
|
||||||
|
trace->offset = nr_stack_trace_entries;
|
||||||
|
max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
|
||||||
|
trace->nr_entries = stack_trace_save(entries, max_entries, 3);
|
||||||
nr_stack_trace_entries += trace->nr_entries;
|
nr_stack_trace_entries += trace->nr_entries;
|
||||||
|
|
||||||
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
|
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
|
||||||
@ -1207,7 +1192,7 @@ static struct lock_list *alloc_list_entry(void)
|
|||||||
static int add_lock_to_list(struct lock_class *this,
|
static int add_lock_to_list(struct lock_class *this,
|
||||||
struct lock_class *links_to, struct list_head *head,
|
struct lock_class *links_to, struct list_head *head,
|
||||||
unsigned long ip, int distance,
|
unsigned long ip, int distance,
|
||||||
struct stack_trace *trace)
|
struct lock_trace *trace)
|
||||||
{
|
{
|
||||||
struct lock_list *entry;
|
struct lock_list *entry;
|
||||||
/*
|
/*
|
||||||
@ -1426,6 +1411,13 @@ static inline int __bfs_backwards(struct lock_list *src_entry,
|
|||||||
* checking.
|
* checking.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
|
||||||
|
{
|
||||||
|
unsigned long *entries = stack_trace + trace->offset;
|
||||||
|
|
||||||
|
stack_trace_print(entries, trace->nr_entries, spaces);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Print a dependency chain entry (this is only done when a deadlock
|
* Print a dependency chain entry (this is only done when a deadlock
|
||||||
* has been detected):
|
* has been detected):
|
||||||
@ -1438,8 +1430,7 @@ print_circular_bug_entry(struct lock_list *target, int depth)
|
|||||||
printk("\n-> #%u", depth);
|
printk("\n-> #%u", depth);
|
||||||
print_lock_name(target->class);
|
print_lock_name(target->class);
|
||||||
printk(KERN_CONT ":\n");
|
printk(KERN_CONT ":\n");
|
||||||
print_stack_trace(&target->trace, 6);
|
print_lock_trace(&target->trace, 6);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1533,10 +1524,9 @@ static inline int class_equal(struct lock_list *entry, void *data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static noinline int print_circular_bug(struct lock_list *this,
|
static noinline int print_circular_bug(struct lock_list *this,
|
||||||
struct lock_list *target,
|
struct lock_list *target,
|
||||||
struct held_lock *check_src,
|
struct held_lock *check_src,
|
||||||
struct held_lock *check_tgt,
|
struct held_lock *check_tgt)
|
||||||
struct stack_trace *trace)
|
|
||||||
{
|
{
|
||||||
struct task_struct *curr = current;
|
struct task_struct *curr = current;
|
||||||
struct lock_list *parent;
|
struct lock_list *parent;
|
||||||
@ -1752,7 +1742,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
|
|||||||
|
|
||||||
len += printk("%*s %s", depth, "", usage_str[bit]);
|
len += printk("%*s %s", depth, "", usage_str[bit]);
|
||||||
len += printk(KERN_CONT " at:\n");
|
len += printk(KERN_CONT " at:\n");
|
||||||
print_stack_trace(class->usage_traces + bit, len);
|
print_lock_trace(class->usage_traces + bit, len);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
printk("%*s }\n", depth, "");
|
printk("%*s }\n", depth, "");
|
||||||
@ -1777,7 +1767,7 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
|
|||||||
do {
|
do {
|
||||||
print_lock_class_header(entry->class, depth);
|
print_lock_class_header(entry->class, depth);
|
||||||
printk("%*s ... acquired at:\n", depth, "");
|
printk("%*s ... acquired at:\n", depth, "");
|
||||||
print_stack_trace(&entry->trace, 2);
|
print_lock_trace(&entry->trace, 2);
|
||||||
printk("\n");
|
printk("\n");
|
||||||
|
|
||||||
if (depth == 0 && (entry != root)) {
|
if (depth == 0 && (entry != root)) {
|
||||||
@ -1890,14 +1880,14 @@ print_bad_irq_dependency(struct task_struct *curr,
|
|||||||
print_lock_name(backwards_entry->class);
|
print_lock_name(backwards_entry->class);
|
||||||
pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
|
pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
|
||||||
|
|
||||||
print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
|
print_lock_trace(backwards_entry->class->usage_traces + bit1, 1);
|
||||||
|
|
||||||
pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
|
pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
|
||||||
print_lock_name(forwards_entry->class);
|
print_lock_name(forwards_entry->class);
|
||||||
pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
|
pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
|
||||||
pr_warn("...");
|
pr_warn("...");
|
||||||
|
|
||||||
print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
|
print_lock_trace(forwards_entry->class->usage_traces + bit2, 1);
|
||||||
|
|
||||||
pr_warn("\nother info that might help us debug this:\n\n");
|
pr_warn("\nother info that might help us debug this:\n\n");
|
||||||
print_irq_lock_scenario(backwards_entry, forwards_entry,
|
print_irq_lock_scenario(backwards_entry, forwards_entry,
|
||||||
@ -2170,8 +2160,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
|
|||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
||||||
struct held_lock *next, int distance, struct stack_trace *trace,
|
struct held_lock *next, int distance, struct lock_trace *trace)
|
||||||
int (*save)(struct stack_trace *trace))
|
|
||||||
{
|
{
|
||||||
struct lock_list *uninitialized_var(target_entry);
|
struct lock_list *uninitialized_var(target_entry);
|
||||||
struct lock_list *entry;
|
struct lock_list *entry;
|
||||||
@ -2209,15 +2198,15 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
|||||||
this.parent = NULL;
|
this.parent = NULL;
|
||||||
ret = check_noncircular(&this, hlock_class(prev), &target_entry);
|
ret = check_noncircular(&this, hlock_class(prev), &target_entry);
|
||||||
if (unlikely(!ret)) {
|
if (unlikely(!ret)) {
|
||||||
if (!trace->entries) {
|
if (!trace->nr_entries) {
|
||||||
/*
|
/*
|
||||||
* If @save fails here, the printing might trigger
|
* If save_trace fails here, the printing might
|
||||||
* a WARN but because of the !nr_entries it should
|
* trigger a WARN but because of the !nr_entries it
|
||||||
* not do bad things.
|
* should not do bad things.
|
||||||
*/
|
*/
|
||||||
save(trace);
|
save_trace(trace);
|
||||||
}
|
}
|
||||||
return print_circular_bug(&this, target_entry, next, prev, trace);
|
return print_circular_bug(&this, target_entry, next, prev);
|
||||||
}
|
}
|
||||||
else if (unlikely(ret < 0))
|
else if (unlikely(ret < 0))
|
||||||
return print_bfs_bug(ret);
|
return print_bfs_bug(ret);
|
||||||
@ -2265,7 +2254,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
|||||||
return print_bfs_bug(ret);
|
return print_bfs_bug(ret);
|
||||||
|
|
||||||
|
|
||||||
if (!trace->entries && !save(trace))
|
if (!trace->nr_entries && !save_trace(trace))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2297,14 +2286,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
|
|||||||
static int
|
static int
|
||||||
check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
||||||
{
|
{
|
||||||
|
struct lock_trace trace = { .nr_entries = 0 };
|
||||||
int depth = curr->lockdep_depth;
|
int depth = curr->lockdep_depth;
|
||||||
struct held_lock *hlock;
|
struct held_lock *hlock;
|
||||||
struct stack_trace trace = {
|
|
||||||
.nr_entries = 0,
|
|
||||||
.max_entries = 0,
|
|
||||||
.entries = NULL,
|
|
||||||
.skip = 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Debugging checks.
|
* Debugging checks.
|
||||||
@ -2330,7 +2314,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
|
|||||||
* added:
|
* added:
|
||||||
*/
|
*/
|
||||||
if (hlock->read != 2 && hlock->check) {
|
if (hlock->read != 2 && hlock->check) {
|
||||||
int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace);
|
int ret = check_prev_add(curr, hlock, next, distance,
|
||||||
|
&trace);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -2731,6 +2716,10 @@ static inline int validate_chain(struct task_struct *curr,
|
|||||||
{
|
{
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void print_lock_trace(struct lock_trace *trace, unsigned int spaces)
|
||||||
|
{
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2827,7 +2816,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
|
|||||||
print_lock(this);
|
print_lock(this);
|
||||||
|
|
||||||
pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
|
pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
|
||||||
print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
|
print_lock_trace(hlock_class(this)->usage_traces + prev_bit, 1);
|
||||||
|
|
||||||
print_irqtrace_events(curr);
|
print_irqtrace_events(curr);
|
||||||
pr_warn("\nother info that might help us debug this:\n");
|
pr_warn("\nother info that might help us debug this:\n");
|
||||||
|
@ -5,41 +5,56 @@
|
|||||||
*
|
*
|
||||||
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
||||||
*/
|
*/
|
||||||
|
#include <linux/sched/task_stack.h>
|
||||||
|
#include <linux/sched/debug.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/kallsyms.h>
|
#include <linux/kallsyms.h>
|
||||||
#include <linux/stacktrace.h>
|
#include <linux/stacktrace.h>
|
||||||
|
|
||||||
void print_stack_trace(struct stack_trace *trace, int spaces)
|
/**
|
||||||
|
* stack_trace_print - Print the entries in the stack trace
|
||||||
|
* @entries: Pointer to storage array
|
||||||
|
* @nr_entries: Number of entries in the storage array
|
||||||
|
* @spaces: Number of leading spaces to print
|
||||||
|
*/
|
||||||
|
void stack_trace_print(unsigned long *entries, unsigned int nr_entries,
|
||||||
|
int spaces)
|
||||||
{
|
{
|
||||||
int i;
|
unsigned int i;
|
||||||
|
|
||||||
if (WARN_ON(!trace->entries))
|
if (WARN_ON(!entries))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < trace->nr_entries; i++)
|
for (i = 0; i < nr_entries; i++)
|
||||||
printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
|
printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(print_stack_trace);
|
EXPORT_SYMBOL_GPL(stack_trace_print);
|
||||||
|
|
||||||
int snprint_stack_trace(char *buf, size_t size,
|
/**
|
||||||
struct stack_trace *trace, int spaces)
|
* stack_trace_snprint - Print the entries in the stack trace into a buffer
|
||||||
|
* @buf: Pointer to the print buffer
|
||||||
|
* @size: Size of the print buffer
|
||||||
|
* @entries: Pointer to storage array
|
||||||
|
* @nr_entries: Number of entries in the storage array
|
||||||
|
* @spaces: Number of leading spaces to print
|
||||||
|
*
|
||||||
|
* Return: Number of bytes printed.
|
||||||
|
*/
|
||||||
|
int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
|
||||||
|
unsigned int nr_entries, int spaces)
|
||||||
{
|
{
|
||||||
int i;
|
unsigned int generated, i, total = 0;
|
||||||
int generated;
|
|
||||||
int total = 0;
|
|
||||||
|
|
||||||
if (WARN_ON(!trace->entries))
|
if (WARN_ON(!entries))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < trace->nr_entries; i++) {
|
for (i = 0; i < nr_entries && size; i++) {
|
||||||
generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
|
generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
|
||||||
(void *)trace->entries[i]);
|
(void *)entries[i]);
|
||||||
|
|
||||||
total += generated;
|
total += generated;
|
||||||
|
|
||||||
/* Assume that generated isn't a negative number */
|
|
||||||
if (generated >= size) {
|
if (generated >= size) {
|
||||||
buf += size;
|
buf += size;
|
||||||
size = 0;
|
size = 0;
|
||||||
@ -51,7 +66,176 @@ int snprint_stack_trace(char *buf, size_t size,
|
|||||||
|
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(snprint_stack_trace);
|
EXPORT_SYMBOL_GPL(stack_trace_snprint);
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARCH_STACKWALK
|
||||||
|
|
||||||
|
struct stacktrace_cookie {
|
||||||
|
unsigned long *store;
|
||||||
|
unsigned int size;
|
||||||
|
unsigned int skip;
|
||||||
|
unsigned int len;
|
||||||
|
};
|
||||||
|
|
||||||
|
static bool stack_trace_consume_entry(void *cookie, unsigned long addr,
|
||||||
|
bool reliable)
|
||||||
|
{
|
||||||
|
struct stacktrace_cookie *c = cookie;
|
||||||
|
|
||||||
|
if (c->len >= c->size)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (c->skip > 0) {
|
||||||
|
c->skip--;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
c->store[c->len++] = addr;
|
||||||
|
return c->len < c->size;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr,
|
||||||
|
bool reliable)
|
||||||
|
{
|
||||||
|
if (in_sched_functions(addr))
|
||||||
|
return true;
|
||||||
|
return stack_trace_consume_entry(cookie, addr, reliable);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* stack_trace_save - Save a stack trace into a storage array
|
||||||
|
* @store: Pointer to storage array
|
||||||
|
* @size: Size of the storage array
|
||||||
|
* @skipnr: Number of entries to skip at the start of the stack trace
|
||||||
|
*
|
||||||
|
* Return: Number of trace entries stored.
|
||||||
|
*/
|
||||||
|
unsigned int stack_trace_save(unsigned long *store, unsigned int size,
|
||||||
|
unsigned int skipnr)
|
||||||
|
{
|
||||||
|
stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
|
||||||
|
struct stacktrace_cookie c = {
|
||||||
|
.store = store,
|
||||||
|
.size = size,
|
||||||
|
.skip = skipnr + 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
arch_stack_walk(consume_entry, &c, current, NULL);
|
||||||
|
return c.len;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(stack_trace_save);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* stack_trace_save_tsk - Save a task stack trace into a storage array
|
||||||
|
* @task: The task to examine
|
||||||
|
* @store: Pointer to storage array
|
||||||
|
* @size: Size of the storage array
|
||||||
|
* @skipnr: Number of entries to skip at the start of the stack trace
|
||||||
|
*
|
||||||
|
* Return: Number of trace entries stored.
|
||||||
|
*/
|
||||||
|
unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
|
||||||
|
unsigned int size, unsigned int skipnr)
|
||||||
|
{
|
||||||
|
stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched;
|
||||||
|
struct stacktrace_cookie c = {
|
||||||
|
.store = store,
|
||||||
|
.size = size,
|
||||||
|
.skip = skipnr + 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!try_get_task_stack(tsk))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
arch_stack_walk(consume_entry, &c, tsk, NULL);
|
||||||
|
put_task_stack(tsk);
|
||||||
|
return c.len;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
|
||||||
|
* @regs: Pointer to pt_regs to examine
|
||||||
|
* @store: Pointer to storage array
|
||||||
|
* @size: Size of the storage array
|
||||||
|
* @skipnr: Number of entries to skip at the start of the stack trace
|
||||||
|
*
|
||||||
|
* Return: Number of trace entries stored.
|
||||||
|
*/
|
||||||
|
unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
|
||||||
|
unsigned int size, unsigned int skipnr)
|
||||||
|
{
|
||||||
|
stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
|
||||||
|
struct stacktrace_cookie c = {
|
||||||
|
.store = store,
|
||||||
|
.size = size,
|
||||||
|
.skip = skipnr,
|
||||||
|
};
|
||||||
|
|
||||||
|
arch_stack_walk(consume_entry, &c, current, regs);
|
||||||
|
return c.len;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
|
||||||
|
/**
|
||||||
|
* stack_trace_save_tsk_reliable - Save task stack with verification
|
||||||
|
* @tsk: Pointer to the task to examine
|
||||||
|
* @store: Pointer to storage array
|
||||||
|
* @size: Size of the storage array
|
||||||
|
*
|
||||||
|
* Return: An error if it detects any unreliable features of the
|
||||||
|
* stack. Otherwise it guarantees that the stack trace is
|
||||||
|
* reliable and returns the number of entries stored.
|
||||||
|
*
|
||||||
|
* If the task is not 'current', the caller *must* ensure the task is inactive.
|
||||||
|
*/
|
||||||
|
int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
|
||||||
|
unsigned int size)
|
||||||
|
{
|
||||||
|
stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
|
||||||
|
struct stacktrace_cookie c = {
|
||||||
|
.store = store,
|
||||||
|
.size = size,
|
||||||
|
};
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the task doesn't have a stack (e.g., a zombie), the stack is
|
||||||
|
* "reliably" empty.
|
||||||
|
*/
|
||||||
|
if (!try_get_task_stack(tsk))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
|
||||||
|
put_task_stack(tsk);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
|
||||||
|
/**
|
||||||
|
* stack_trace_save_user - Save a user space stack trace into a storage array
|
||||||
|
* @store: Pointer to storage array
|
||||||
|
* @size: Size of the storage array
|
||||||
|
*
|
||||||
|
* Return: Number of trace entries stored.
|
||||||
|
*/
|
||||||
|
unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
|
||||||
|
{
|
||||||
|
stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
|
||||||
|
struct stacktrace_cookie c = {
|
||||||
|
.store = store,
|
||||||
|
.size = size,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Trace user stack if not a kernel thread */
|
||||||
|
if (!current->mm)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
|
||||||
|
return c.len;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#else /* CONFIG_ARCH_STACKWALK */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Architectures that do not implement save_stack_trace_*()
|
* Architectures that do not implement save_stack_trace_*()
|
||||||
@ -77,3 +261,118 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
|
|||||||
WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
|
WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* stack_trace_save - Save a stack trace into a storage array
|
||||||
|
* @store: Pointer to storage array
|
||||||
|
* @size: Size of the storage array
|
||||||
|
* @skipnr: Number of entries to skip at the start of the stack trace
|
||||||
|
*
|
||||||
|
* Return: Number of trace entries stored
|
||||||
|
*/
|
||||||
|
unsigned int stack_trace_save(unsigned long *store, unsigned int size,
|
||||||
|
unsigned int skipnr)
|
||||||
|
{
|
||||||
|
struct stack_trace trace = {
|
||||||
|
.entries = store,
|
||||||
|
.max_entries = size,
|
||||||
|
.skip = skipnr + 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
save_stack_trace(&trace);
|
||||||
|
return trace.nr_entries;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(stack_trace_save);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* stack_trace_save_tsk - Save a task stack trace into a storage array
|
||||||
|
* @task: The task to examine
|
||||||
|
* @store: Pointer to storage array
|
||||||
|
* @size: Size of the storage array
|
||||||
|
* @skipnr: Number of entries to skip at the start of the stack trace
|
||||||
|
*
|
||||||
|
* Return: Number of trace entries stored
|
||||||
|
*/
|
||||||
|
unsigned int stack_trace_save_tsk(struct task_struct *task,
|
||||||
|
unsigned long *store, unsigned int size,
|
||||||
|
unsigned int skipnr)
|
||||||
|
{
|
||||||
|
struct stack_trace trace = {
|
||||||
|
.entries = store,
|
||||||
|
.max_entries = size,
|
||||||
|
.skip = skipnr + 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
save_stack_trace_tsk(task, &trace);
|
||||||
|
return trace.nr_entries;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
|
||||||
|
* @regs: Pointer to pt_regs to examine
|
||||||
|
* @store: Pointer to storage array
|
||||||
|
* @size: Size of the storage array
|
||||||
|
* @skipnr: Number of entries to skip at the start of the stack trace
|
||||||
|
*
|
||||||
|
* Return: Number of trace entries stored
|
||||||
|
*/
|
||||||
|
unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
|
||||||
|
unsigned int size, unsigned int skipnr)
|
||||||
|
{
|
||||||
|
struct stack_trace trace = {
|
||||||
|
.entries = store,
|
||||||
|
.max_entries = size,
|
||||||
|
.skip = skipnr,
|
||||||
|
};
|
||||||
|
|
||||||
|
save_stack_trace_regs(regs, &trace);
|
||||||
|
return trace.nr_entries;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
|
||||||
|
/**
|
||||||
|
* stack_trace_save_tsk_reliable - Save task stack with verification
|
||||||
|
* @tsk: Pointer to the task to examine
|
||||||
|
* @store: Pointer to storage array
|
||||||
|
* @size: Size of the storage array
|
||||||
|
*
|
||||||
|
* Return: An error if it detects any unreliable features of the
|
||||||
|
* stack. Otherwise it guarantees that the stack trace is
|
||||||
|
* reliable and returns the number of entries stored.
|
||||||
|
*
|
||||||
|
* If the task is not 'current', the caller *must* ensure the task is inactive.
|
||||||
|
*/
|
||||||
|
int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
|
||||||
|
unsigned int size)
|
||||||
|
{
|
||||||
|
struct stack_trace trace = {
|
||||||
|
.entries = store,
|
||||||
|
.max_entries = size,
|
||||||
|
};
|
||||||
|
int ret = save_stack_trace_tsk_reliable(tsk, &trace);
|
||||||
|
|
||||||
|
return ret ? ret : trace.nr_entries;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
|
||||||
|
/**
|
||||||
|
* stack_trace_save_user - Save a user space stack trace into a storage array
|
||||||
|
* @store: Pointer to storage array
|
||||||
|
* @size: Size of the storage array
|
||||||
|
*
|
||||||
|
* Return: Number of trace entries stored
|
||||||
|
*/
|
||||||
|
unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
|
||||||
|
{
|
||||||
|
struct stack_trace trace = {
|
||||||
|
.entries = store,
|
||||||
|
.max_entries = size,
|
||||||
|
};
|
||||||
|
|
||||||
|
save_stack_trace_user(&trace);
|
||||||
|
return trace.nr_entries;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_USER_STACKTRACE_SUPPORT */
|
||||||
|
|
||||||
|
#endif /* !CONFIG_ARCH_STACKWALK */
|
||||||
|
@ -159,6 +159,8 @@ static union trace_eval_map_item *trace_eval_maps;
|
|||||||
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
|
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */
|
||||||
|
|
||||||
static int tracing_set_tracer(struct trace_array *tr, const char *buf);
|
static int tracing_set_tracer(struct trace_array *tr, const char *buf);
|
||||||
|
static void ftrace_trace_userstack(struct ring_buffer *buffer,
|
||||||
|
unsigned long flags, int pc);
|
||||||
|
|
||||||
#define MAX_TRACER_SIZE 100
|
#define MAX_TRACER_SIZE 100
|
||||||
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
|
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
|
||||||
@ -2752,12 +2754,21 @@ trace_function(struct trace_array *tr,
|
|||||||
|
|
||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
|
|
||||||
#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
|
/* Allow 4 levels of nesting: normal, softirq, irq, NMI */
|
||||||
|
#define FTRACE_KSTACK_NESTING 4
|
||||||
|
|
||||||
|
#define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
|
||||||
|
|
||||||
struct ftrace_stack {
|
struct ftrace_stack {
|
||||||
unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
|
unsigned long calls[FTRACE_KSTACK_ENTRIES];
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
|
|
||||||
|
struct ftrace_stacks {
|
||||||
|
struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
|
||||||
|
};
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
|
||||||
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
|
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
|
||||||
|
|
||||||
static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
||||||
@ -2766,13 +2777,10 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|||||||
{
|
{
|
||||||
struct trace_event_call *call = &event_kernel_stack;
|
struct trace_event_call *call = &event_kernel_stack;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
|
unsigned int size, nr_entries;
|
||||||
|
struct ftrace_stack *fstack;
|
||||||
struct stack_entry *entry;
|
struct stack_entry *entry;
|
||||||
struct stack_trace trace;
|
int stackidx;
|
||||||
int use_stack;
|
|
||||||
int size = FTRACE_STACK_ENTRIES;
|
|
||||||
|
|
||||||
trace.nr_entries = 0;
|
|
||||||
trace.skip = skip;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Add one, for this function and the call to save_stack_trace()
|
* Add one, for this function and the call to save_stack_trace()
|
||||||
@ -2780,7 +2788,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|||||||
*/
|
*/
|
||||||
#ifndef CONFIG_UNWINDER_ORC
|
#ifndef CONFIG_UNWINDER_ORC
|
||||||
if (!regs)
|
if (!regs)
|
||||||
trace.skip++;
|
skip++;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2791,53 +2799,40 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|||||||
*/
|
*/
|
||||||
preempt_disable_notrace();
|
preempt_disable_notrace();
|
||||||
|
|
||||||
use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
|
stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
|
||||||
|
|
||||||
|
/* This should never happen. If it does, yell once and skip */
|
||||||
|
if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
|
||||||
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't need any atomic variables, just a barrier.
|
* The above __this_cpu_inc_return() is 'atomic' cpu local. An
|
||||||
* If an interrupt comes in, we don't care, because it would
|
* interrupt will either see the value pre increment or post
|
||||||
* have exited and put the counter back to what we want.
|
* increment. If the interrupt happens pre increment it will have
|
||||||
* We just need a barrier to keep gcc from moving things
|
* restored the counter when it returns. We just need a barrier to
|
||||||
* around.
|
* keep gcc from moving things around.
|
||||||
*/
|
*/
|
||||||
barrier();
|
barrier();
|
||||||
if (use_stack == 1) {
|
|
||||||
trace.entries = this_cpu_ptr(ftrace_stack.calls);
|
|
||||||
trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
|
|
||||||
|
|
||||||
if (regs)
|
fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
|
||||||
save_stack_trace_regs(regs, &trace);
|
size = ARRAY_SIZE(fstack->calls);
|
||||||
else
|
|
||||||
save_stack_trace(&trace);
|
|
||||||
|
|
||||||
if (trace.nr_entries > size)
|
if (regs) {
|
||||||
size = trace.nr_entries;
|
nr_entries = stack_trace_save_regs(regs, fstack->calls,
|
||||||
} else
|
size, skip);
|
||||||
/* From now on, use_stack is a boolean */
|
} else {
|
||||||
use_stack = 0;
|
nr_entries = stack_trace_save(fstack->calls, size, skip);
|
||||||
|
}
|
||||||
size *= sizeof(unsigned long);
|
|
||||||
|
|
||||||
|
size = nr_entries * sizeof(unsigned long);
|
||||||
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
|
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
|
||||||
sizeof(*entry) + size, flags, pc);
|
sizeof(*entry) + size, flags, pc);
|
||||||
if (!event)
|
if (!event)
|
||||||
goto out;
|
goto out;
|
||||||
entry = ring_buffer_event_data(event);
|
entry = ring_buffer_event_data(event);
|
||||||
|
|
||||||
memset(&entry->caller, 0, size);
|
memcpy(&entry->caller, fstack->calls, size);
|
||||||
|
entry->size = nr_entries;
|
||||||
if (use_stack)
|
|
||||||
memcpy(&entry->caller, trace.entries,
|
|
||||||
trace.nr_entries * sizeof(unsigned long));
|
|
||||||
else {
|
|
||||||
trace.max_entries = FTRACE_STACK_ENTRIES;
|
|
||||||
trace.entries = entry->caller;
|
|
||||||
if (regs)
|
|
||||||
save_stack_trace_regs(regs, &trace);
|
|
||||||
else
|
|
||||||
save_stack_trace(&trace);
|
|
||||||
}
|
|
||||||
|
|
||||||
entry->size = trace.nr_entries;
|
|
||||||
|
|
||||||
if (!call_filter_check_discard(call, entry, buffer, event))
|
if (!call_filter_check_discard(call, entry, buffer, event))
|
||||||
__buffer_unlock_commit(buffer, event);
|
__buffer_unlock_commit(buffer, event);
|
||||||
@ -2907,15 +2902,15 @@ void trace_dump_stack(int skip)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(trace_dump_stack);
|
EXPORT_SYMBOL_GPL(trace_dump_stack);
|
||||||
|
|
||||||
|
#ifdef CONFIG_USER_STACKTRACE_SUPPORT
|
||||||
static DEFINE_PER_CPU(int, user_stack_count);
|
static DEFINE_PER_CPU(int, user_stack_count);
|
||||||
|
|
||||||
void
|
static void
|
||||||
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
||||||
{
|
{
|
||||||
struct trace_event_call *call = &event_user_stack;
|
struct trace_event_call *call = &event_user_stack;
|
||||||
struct ring_buffer_event *event;
|
struct ring_buffer_event *event;
|
||||||
struct userstack_entry *entry;
|
struct userstack_entry *entry;
|
||||||
struct stack_trace trace;
|
|
||||||
|
|
||||||
if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
|
if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
|
||||||
return;
|
return;
|
||||||
@ -2946,12 +2941,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|||||||
entry->tgid = current->tgid;
|
entry->tgid = current->tgid;
|
||||||
memset(&entry->caller, 0, sizeof(entry->caller));
|
memset(&entry->caller, 0, sizeof(entry->caller));
|
||||||
|
|
||||||
trace.nr_entries = 0;
|
stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
|
||||||
trace.max_entries = FTRACE_STACK_ENTRIES;
|
|
||||||
trace.skip = 0;
|
|
||||||
trace.entries = entry->caller;
|
|
||||||
|
|
||||||
save_stack_trace_user(&trace);
|
|
||||||
if (!call_filter_check_discard(call, entry, buffer, event))
|
if (!call_filter_check_discard(call, entry, buffer, event))
|
||||||
__buffer_unlock_commit(buffer, event);
|
__buffer_unlock_commit(buffer, event);
|
||||||
|
|
||||||
@ -2960,13 +2950,12 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|||||||
out:
|
out:
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
|
||||||
#ifdef UNUSED
|
static void ftrace_trace_userstack(struct ring_buffer *buffer,
|
||||||
static void __trace_userstack(struct trace_array *tr, unsigned long flags)
|
unsigned long flags, int pc)
|
||||||
{
|
{
|
||||||
ftrace_trace_userstack(tr, flags, preempt_count());
|
|
||||||
}
|
}
|
||||||
#endif /* UNUSED */
|
#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
|
||||||
|
|
||||||
#endif /* CONFIG_STACKTRACE */
|
#endif /* CONFIG_STACKTRACE */
|
||||||
|
|
||||||
|
@ -782,17 +782,9 @@ void update_max_tr_single(struct trace_array *tr,
|
|||||||
#endif /* CONFIG_TRACER_MAX_TRACE */
|
#endif /* CONFIG_TRACER_MAX_TRACE */
|
||||||
|
|
||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
|
|
||||||
int pc);
|
|
||||||
|
|
||||||
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
|
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
|
||||||
int pc);
|
int pc);
|
||||||
#else
|
#else
|
||||||
static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
|
|
||||||
unsigned long flags, int pc)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
|
static inline void __trace_stack(struct trace_array *tr, unsigned long flags,
|
||||||
int skip, int pc)
|
int skip, int pc)
|
||||||
{
|
{
|
||||||
|
@ -5186,7 +5186,6 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
|
|||||||
u64 var_ref_vals[TRACING_MAP_VARS_MAX];
|
u64 var_ref_vals[TRACING_MAP_VARS_MAX];
|
||||||
char compound_key[HIST_KEY_SIZE_MAX];
|
char compound_key[HIST_KEY_SIZE_MAX];
|
||||||
struct tracing_map_elt *elt = NULL;
|
struct tracing_map_elt *elt = NULL;
|
||||||
struct stack_trace stacktrace;
|
|
||||||
struct hist_field *key_field;
|
struct hist_field *key_field;
|
||||||
u64 field_contents;
|
u64 field_contents;
|
||||||
void *key = NULL;
|
void *key = NULL;
|
||||||
@ -5198,14 +5197,9 @@ static void event_hist_trigger(struct event_trigger_data *data, void *rec,
|
|||||||
key_field = hist_data->fields[i];
|
key_field = hist_data->fields[i];
|
||||||
|
|
||||||
if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
|
if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
|
||||||
stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
|
memset(entries, 0, HIST_STACKTRACE_SIZE);
|
||||||
stacktrace.entries = entries;
|
stack_trace_save(entries, HIST_STACKTRACE_DEPTH,
|
||||||
stacktrace.nr_entries = 0;
|
HIST_STACKTRACE_SKIP);
|
||||||
stacktrace.skip = HIST_STACKTRACE_SKIP;
|
|
||||||
|
|
||||||
memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
|
|
||||||
save_stack_trace(&stacktrace);
|
|
||||||
|
|
||||||
key = entries;
|
key = entries;
|
||||||
} else {
|
} else {
|
||||||
field_contents = key_field->fn(key_field, elt, rbe, rec);
|
field_contents = key_field->fn(key_field, elt, rbe, rec);
|
||||||
@ -5246,7 +5240,7 @@ static void hist_trigger_stacktrace_print(struct seq_file *m,
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
for (i = 0; i < max_entries; i++) {
|
for (i = 0; i < max_entries; i++) {
|
||||||
if (stacktrace_entries[i] == ULONG_MAX)
|
if (!stacktrace_entries[i])
|
||||||
return;
|
return;
|
||||||
|
|
||||||
seq_printf(m, "%*c", 1 + spaces, ' ');
|
seq_printf(m, "%*c", 1 + spaces, ' ');
|
||||||
|
@ -18,44 +18,32 @@
|
|||||||
|
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
|
#define STACK_TRACE_ENTRIES 500
|
||||||
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
|
|
||||||
unsigned stack_trace_index[STACK_TRACE_ENTRIES];
|
|
||||||
|
|
||||||
/*
|
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
|
||||||
* Reserve one entry for the passed in ip. This will allow
|
static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
|
||||||
* us to remove most or all of the stack size overhead
|
|
||||||
* added by the stack tracer itself.
|
|
||||||
*/
|
|
||||||
struct stack_trace stack_trace_max = {
|
|
||||||
.max_entries = STACK_TRACE_ENTRIES - 1,
|
|
||||||
.entries = &stack_dump_trace[0],
|
|
||||||
};
|
|
||||||
|
|
||||||
unsigned long stack_trace_max_size;
|
static unsigned int stack_trace_nr_entries;
|
||||||
arch_spinlock_t stack_trace_max_lock =
|
static unsigned long stack_trace_max_size;
|
||||||
|
static arch_spinlock_t stack_trace_max_lock =
|
||||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
DEFINE_PER_CPU(int, disable_stack_tracer);
|
DEFINE_PER_CPU(int, disable_stack_tracer);
|
||||||
static DEFINE_MUTEX(stack_sysctl_mutex);
|
static DEFINE_MUTEX(stack_sysctl_mutex);
|
||||||
|
|
||||||
int stack_tracer_enabled;
|
int stack_tracer_enabled;
|
||||||
static int last_stack_tracer_enabled;
|
|
||||||
|
|
||||||
void stack_trace_print(void)
|
static void print_max_stack(void)
|
||||||
{
|
{
|
||||||
long i;
|
long i;
|
||||||
int size;
|
int size;
|
||||||
|
|
||||||
pr_emerg(" Depth Size Location (%d entries)\n"
|
pr_emerg(" Depth Size Location (%d entries)\n"
|
||||||
" ----- ---- --------\n",
|
" ----- ---- --------\n",
|
||||||
stack_trace_max.nr_entries);
|
stack_trace_nr_entries);
|
||||||
|
|
||||||
for (i = 0; i < stack_trace_max.nr_entries; i++) {
|
for (i = 0; i < stack_trace_nr_entries; i++) {
|
||||||
if (stack_dump_trace[i] == ULONG_MAX)
|
if (i + 1 == stack_trace_nr_entries)
|
||||||
break;
|
|
||||||
if (i+1 == stack_trace_max.nr_entries ||
|
|
||||||
stack_dump_trace[i+1] == ULONG_MAX)
|
|
||||||
size = stack_trace_index[i];
|
size = stack_trace_index[i];
|
||||||
else
|
else
|
||||||
size = stack_trace_index[i] - stack_trace_index[i+1];
|
size = stack_trace_index[i] - stack_trace_index[i+1];
|
||||||
@ -65,16 +53,7 @@ void stack_trace_print(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static void check_stack(unsigned long ip, unsigned long *stack)
|
||||||
* When arch-specific code overrides this function, the following
|
|
||||||
* data should be filled up, assuming stack_trace_max_lock is held to
|
|
||||||
* prevent concurrent updates.
|
|
||||||
* stack_trace_index[]
|
|
||||||
* stack_trace_max
|
|
||||||
* stack_trace_max_size
|
|
||||||
*/
|
|
||||||
void __weak
|
|
||||||
check_stack(unsigned long ip, unsigned long *stack)
|
|
||||||
{
|
{
|
||||||
unsigned long this_size, flags; unsigned long *p, *top, *start;
|
unsigned long this_size, flags; unsigned long *p, *top, *start;
|
||||||
static int tracer_frame;
|
static int tracer_frame;
|
||||||
@ -110,13 +89,12 @@ check_stack(unsigned long ip, unsigned long *stack)
|
|||||||
|
|
||||||
stack_trace_max_size = this_size;
|
stack_trace_max_size = this_size;
|
||||||
|
|
||||||
stack_trace_max.nr_entries = 0;
|
stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
|
||||||
stack_trace_max.skip = 0;
|
ARRAY_SIZE(stack_dump_trace) - 1,
|
||||||
|
0);
|
||||||
save_stack_trace(&stack_trace_max);
|
|
||||||
|
|
||||||
/* Skip over the overhead of the stack tracer itself */
|
/* Skip over the overhead of the stack tracer itself */
|
||||||
for (i = 0; i < stack_trace_max.nr_entries; i++) {
|
for (i = 0; i < stack_trace_nr_entries; i++) {
|
||||||
if (stack_dump_trace[i] == ip)
|
if (stack_dump_trace[i] == ip)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -125,7 +103,7 @@ check_stack(unsigned long ip, unsigned long *stack)
|
|||||||
* Some archs may not have the passed in ip in the dump.
|
* Some archs may not have the passed in ip in the dump.
|
||||||
* If that happens, we need to show everything.
|
* If that happens, we need to show everything.
|
||||||
*/
|
*/
|
||||||
if (i == stack_trace_max.nr_entries)
|
if (i == stack_trace_nr_entries)
|
||||||
i = 0;
|
i = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -143,15 +121,13 @@ check_stack(unsigned long ip, unsigned long *stack)
|
|||||||
* loop will only happen once. This code only takes place
|
* loop will only happen once. This code only takes place
|
||||||
* on a new max, so it is far from a fast path.
|
* on a new max, so it is far from a fast path.
|
||||||
*/
|
*/
|
||||||
while (i < stack_trace_max.nr_entries) {
|
while (i < stack_trace_nr_entries) {
|
||||||
int found = 0;
|
int found = 0;
|
||||||
|
|
||||||
stack_trace_index[x] = this_size;
|
stack_trace_index[x] = this_size;
|
||||||
p = start;
|
p = start;
|
||||||
|
|
||||||
for (; p < top && i < stack_trace_max.nr_entries; p++) {
|
for (; p < top && i < stack_trace_nr_entries; p++) {
|
||||||
if (stack_dump_trace[i] == ULONG_MAX)
|
|
||||||
break;
|
|
||||||
/*
|
/*
|
||||||
* The READ_ONCE_NOCHECK is used to let KASAN know that
|
* The READ_ONCE_NOCHECK is used to let KASAN know that
|
||||||
* this is not a stack-out-of-bounds error.
|
* this is not a stack-out-of-bounds error.
|
||||||
@ -182,12 +158,10 @@ check_stack(unsigned long ip, unsigned long *stack)
|
|||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
stack_trace_max.nr_entries = x;
|
stack_trace_nr_entries = x;
|
||||||
for (; x < i; x++)
|
|
||||||
stack_dump_trace[x] = ULONG_MAX;
|
|
||||||
|
|
||||||
if (task_stack_end_corrupted(current)) {
|
if (task_stack_end_corrupted(current)) {
|
||||||
stack_trace_print();
|
print_max_stack();
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -286,7 +260,7 @@ __next(struct seq_file *m, loff_t *pos)
|
|||||||
{
|
{
|
||||||
long n = *pos - 1;
|
long n = *pos - 1;
|
||||||
|
|
||||||
if (n >= stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
|
if (n >= stack_trace_nr_entries)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
m->private = (void *)n;
|
m->private = (void *)n;
|
||||||
@ -350,7 +324,7 @@ static int t_show(struct seq_file *m, void *v)
|
|||||||
seq_printf(m, " Depth Size Location"
|
seq_printf(m, " Depth Size Location"
|
||||||
" (%d entries)\n"
|
" (%d entries)\n"
|
||||||
" ----- ---- --------\n",
|
" ----- ---- --------\n",
|
||||||
stack_trace_max.nr_entries);
|
stack_trace_nr_entries);
|
||||||
|
|
||||||
if (!stack_tracer_enabled && !stack_trace_max_size)
|
if (!stack_tracer_enabled && !stack_trace_max_size)
|
||||||
print_disabled(m);
|
print_disabled(m);
|
||||||
@ -360,12 +334,10 @@ static int t_show(struct seq_file *m, void *v)
|
|||||||
|
|
||||||
i = *(long *)v;
|
i = *(long *)v;
|
||||||
|
|
||||||
if (i >= stack_trace_max.nr_entries ||
|
if (i >= stack_trace_nr_entries)
|
||||||
stack_dump_trace[i] == ULONG_MAX)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (i+1 == stack_trace_max.nr_entries ||
|
if (i + 1 == stack_trace_nr_entries)
|
||||||
stack_dump_trace[i+1] == ULONG_MAX)
|
|
||||||
size = stack_trace_index[i];
|
size = stack_trace_index[i];
|
||||||
else
|
else
|
||||||
size = stack_trace_index[i] - stack_trace_index[i+1];
|
size = stack_trace_index[i] - stack_trace_index[i+1];
|
||||||
@ -422,23 +394,21 @@ stack_trace_sysctl(struct ctl_table *table, int write,
|
|||||||
void __user *buffer, size_t *lenp,
|
void __user *buffer, size_t *lenp,
|
||||||
loff_t *ppos)
|
loff_t *ppos)
|
||||||
{
|
{
|
||||||
|
int was_enabled;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&stack_sysctl_mutex);
|
mutex_lock(&stack_sysctl_mutex);
|
||||||
|
was_enabled = !!stack_tracer_enabled;
|
||||||
|
|
||||||
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||||
|
|
||||||
if (ret || !write ||
|
if (ret || !write || (was_enabled == !!stack_tracer_enabled))
|
||||||
(last_stack_tracer_enabled == !!stack_tracer_enabled))
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
last_stack_tracer_enabled = !!stack_tracer_enabled;
|
|
||||||
|
|
||||||
if (stack_tracer_enabled)
|
if (stack_tracer_enabled)
|
||||||
register_ftrace_function(&trace_ops);
|
register_ftrace_function(&trace_ops);
|
||||||
else
|
else
|
||||||
unregister_ftrace_function(&trace_ops);
|
unregister_ftrace_function(&trace_ops);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
mutex_unlock(&stack_sysctl_mutex);
|
mutex_unlock(&stack_sysctl_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
@ -454,7 +424,6 @@ static __init int enable_stacktrace(char *str)
|
|||||||
strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
|
strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
|
||||||
|
|
||||||
stack_tracer_enabled = 1;
|
stack_tracer_enabled = 1;
|
||||||
last_stack_tracer_enabled = 1;
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("stacktrace", enable_stacktrace);
|
__setup("stacktrace", enable_stacktrace);
|
||||||
|
@ -597,6 +597,10 @@ config ARCH_HAS_UACCESS_FLUSHCACHE
|
|||||||
config ARCH_HAS_UACCESS_MCSAFE
|
config ARCH_HAS_UACCESS_MCSAFE
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
# Temporary. Goes away when all archs are cleaned up
|
||||||
|
config ARCH_STACKWALK
|
||||||
|
bool
|
||||||
|
|
||||||
config STACKDEPOT
|
config STACKDEPOT
|
||||||
bool
|
bool
|
||||||
select STACKTRACE
|
select STACKTRACE
|
||||||
|
@ -65,22 +65,16 @@ static bool fail_task(struct fault_attr *attr, struct task_struct *task)
|
|||||||
|
|
||||||
static bool fail_stacktrace(struct fault_attr *attr)
|
static bool fail_stacktrace(struct fault_attr *attr)
|
||||||
{
|
{
|
||||||
struct stack_trace trace;
|
|
||||||
int depth = attr->stacktrace_depth;
|
int depth = attr->stacktrace_depth;
|
||||||
unsigned long entries[MAX_STACK_TRACE_DEPTH];
|
unsigned long entries[MAX_STACK_TRACE_DEPTH];
|
||||||
int n;
|
int n, nr_entries;
|
||||||
bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
|
bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
|
||||||
|
|
||||||
if (depth == 0)
|
if (depth == 0)
|
||||||
return found;
|
return found;
|
||||||
|
|
||||||
trace.nr_entries = 0;
|
nr_entries = stack_trace_save(entries, depth, 1);
|
||||||
trace.entries = entries;
|
for (n = 0; n < nr_entries; n++) {
|
||||||
trace.max_entries = depth;
|
|
||||||
trace.skip = 1;
|
|
||||||
|
|
||||||
save_stack_trace(&trace);
|
|
||||||
for (n = 0; n < trace.nr_entries; n++) {
|
|
||||||
if (attr->reject_start <= entries[n] &&
|
if (attr->reject_start <= entries[n] &&
|
||||||
entries[n] < attr->reject_end)
|
entries[n] < attr->reject_end)
|
||||||
return false;
|
return false;
|
||||||
|
@ -194,40 +194,52 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
|
/**
|
||||||
|
* stack_depot_fetch - Fetch stack entries from a depot
|
||||||
|
*
|
||||||
|
* @handle: Stack depot handle which was returned from
|
||||||
|
* stack_depot_save().
|
||||||
|
* @entries: Pointer to store the entries address
|
||||||
|
*
|
||||||
|
* Return: The number of trace entries for this depot.
|
||||||
|
*/
|
||||||
|
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
|
||||||
|
unsigned long **entries)
|
||||||
{
|
{
|
||||||
union handle_parts parts = { .handle = handle };
|
union handle_parts parts = { .handle = handle };
|
||||||
void *slab = stack_slabs[parts.slabindex];
|
void *slab = stack_slabs[parts.slabindex];
|
||||||
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
|
size_t offset = parts.offset << STACK_ALLOC_ALIGN;
|
||||||
struct stack_record *stack = slab + offset;
|
struct stack_record *stack = slab + offset;
|
||||||
|
|
||||||
trace->nr_entries = trace->max_entries = stack->size;
|
*entries = stack->entries;
|
||||||
trace->entries = stack->entries;
|
return stack->size;
|
||||||
trace->skip = 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(depot_fetch_stack);
|
EXPORT_SYMBOL_GPL(stack_depot_fetch);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* depot_save_stack - save stack in a stack depot.
|
* stack_depot_save - Save a stack trace from an array
|
||||||
* @trace - the stacktrace to save.
|
|
||||||
* @alloc_flags - flags for allocating additional memory if required.
|
|
||||||
*
|
*
|
||||||
* Returns the handle of the stack struct stored in depot.
|
* @entries: Pointer to storage array
|
||||||
|
* @nr_entries: Size of the storage array
|
||||||
|
* @alloc_flags: Allocation gfp flags
|
||||||
|
*
|
||||||
|
* Return: The handle of the stack struct stored in depot
|
||||||
*/
|
*/
|
||||||
depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
|
depot_stack_handle_t stack_depot_save(unsigned long *entries,
|
||||||
gfp_t alloc_flags)
|
unsigned int nr_entries,
|
||||||
|
gfp_t alloc_flags)
|
||||||
{
|
{
|
||||||
u32 hash;
|
|
||||||
depot_stack_handle_t retval = 0;
|
|
||||||
struct stack_record *found = NULL, **bucket;
|
struct stack_record *found = NULL, **bucket;
|
||||||
unsigned long flags;
|
depot_stack_handle_t retval = 0;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
void *prealloc = NULL;
|
void *prealloc = NULL;
|
||||||
|
unsigned long flags;
|
||||||
|
u32 hash;
|
||||||
|
|
||||||
if (unlikely(trace->nr_entries == 0))
|
if (unlikely(nr_entries == 0))
|
||||||
goto fast_exit;
|
goto fast_exit;
|
||||||
|
|
||||||
hash = hash_stack(trace->entries, trace->nr_entries);
|
hash = hash_stack(entries, nr_entries);
|
||||||
bucket = &stack_table[hash & STACK_HASH_MASK];
|
bucket = &stack_table[hash & STACK_HASH_MASK];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -235,8 +247,8 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
|
|||||||
* The smp_load_acquire() here pairs with smp_store_release() to
|
* The smp_load_acquire() here pairs with smp_store_release() to
|
||||||
* |bucket| below.
|
* |bucket| below.
|
||||||
*/
|
*/
|
||||||
found = find_stack(smp_load_acquire(bucket), trace->entries,
|
found = find_stack(smp_load_acquire(bucket), entries,
|
||||||
trace->nr_entries, hash);
|
nr_entries, hash);
|
||||||
if (found)
|
if (found)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
@ -264,10 +276,10 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
|
|||||||
|
|
||||||
spin_lock_irqsave(&depot_lock, flags);
|
spin_lock_irqsave(&depot_lock, flags);
|
||||||
|
|
||||||
found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
|
found = find_stack(*bucket, entries, nr_entries, hash);
|
||||||
if (!found) {
|
if (!found) {
|
||||||
struct stack_record *new =
|
struct stack_record *new =
|
||||||
depot_alloc_stack(trace->entries, trace->nr_entries,
|
depot_alloc_stack(entries, nr_entries,
|
||||||
hash, &prealloc, alloc_flags);
|
hash, &prealloc, alloc_flags);
|
||||||
if (new) {
|
if (new) {
|
||||||
new->next = *bucket;
|
new->next = *bucket;
|
||||||
@ -297,4 +309,4 @@ exit:
|
|||||||
fast_exit:
|
fast_exit:
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(depot_save_stack);
|
EXPORT_SYMBOL_GPL(stack_depot_save);
|
||||||
|
@ -49,37 +49,28 @@ static inline int in_irqentry_text(unsigned long ptr)
|
|||||||
ptr < (unsigned long)&__softirqentry_text_end);
|
ptr < (unsigned long)&__softirqentry_text_end);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void filter_irq_stacks(struct stack_trace *trace)
|
static inline unsigned int filter_irq_stacks(unsigned long *entries,
|
||||||
|
unsigned int nr_entries)
|
||||||
{
|
{
|
||||||
int i;
|
unsigned int i;
|
||||||
|
|
||||||
if (!trace->nr_entries)
|
for (i = 0; i < nr_entries; i++) {
|
||||||
return;
|
if (in_irqentry_text(entries[i])) {
|
||||||
for (i = 0; i < trace->nr_entries; i++)
|
|
||||||
if (in_irqentry_text(trace->entries[i])) {
|
|
||||||
/* Include the irqentry function into the stack. */
|
/* Include the irqentry function into the stack. */
|
||||||
trace->nr_entries = i + 1;
|
return i + 1;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return nr_entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline depot_stack_handle_t save_stack(gfp_t flags)
|
static inline depot_stack_handle_t save_stack(gfp_t flags)
|
||||||
{
|
{
|
||||||
unsigned long entries[KASAN_STACK_DEPTH];
|
unsigned long entries[KASAN_STACK_DEPTH];
|
||||||
struct stack_trace trace = {
|
unsigned int nr_entries;
|
||||||
.nr_entries = 0,
|
|
||||||
.entries = entries,
|
|
||||||
.max_entries = KASAN_STACK_DEPTH,
|
|
||||||
.skip = 0
|
|
||||||
};
|
|
||||||
|
|
||||||
save_stack_trace(&trace);
|
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
|
||||||
filter_irq_stacks(&trace);
|
nr_entries = filter_irq_stacks(entries, nr_entries);
|
||||||
if (trace.nr_entries != 0 &&
|
return stack_depot_save(entries, nr_entries, flags);
|
||||||
trace.entries[trace.nr_entries-1] == ULONG_MAX)
|
|
||||||
trace.nr_entries--;
|
|
||||||
|
|
||||||
return depot_save_stack(&trace, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void set_track(struct kasan_track *track, gfp_t flags)
|
static inline void set_track(struct kasan_track *track, gfp_t flags)
|
||||||
|
@ -100,10 +100,11 @@ static void print_track(struct kasan_track *track, const char *prefix)
|
|||||||
{
|
{
|
||||||
pr_err("%s by task %u:\n", prefix, track->pid);
|
pr_err("%s by task %u:\n", prefix, track->pid);
|
||||||
if (track->stack) {
|
if (track->stack) {
|
||||||
struct stack_trace trace;
|
unsigned long *entries;
|
||||||
|
unsigned int nr_entries;
|
||||||
|
|
||||||
depot_fetch_stack(track->stack, &trace);
|
nr_entries = stack_depot_fetch(track->stack, &entries);
|
||||||
print_stack_trace(&trace, 0);
|
stack_trace_print(entries, nr_entries, 0);
|
||||||
} else {
|
} else {
|
||||||
pr_err("(stack is not available)\n");
|
pr_err("(stack is not available)\n");
|
||||||
}
|
}
|
||||||
|
@ -410,11 +410,6 @@ static void print_unreferenced(struct seq_file *seq,
|
|||||||
*/
|
*/
|
||||||
static void dump_object_info(struct kmemleak_object *object)
|
static void dump_object_info(struct kmemleak_object *object)
|
||||||
{
|
{
|
||||||
struct stack_trace trace;
|
|
||||||
|
|
||||||
trace.nr_entries = object->trace_len;
|
|
||||||
trace.entries = object->trace;
|
|
||||||
|
|
||||||
pr_notice("Object 0x%08lx (size %zu):\n",
|
pr_notice("Object 0x%08lx (size %zu):\n",
|
||||||
object->pointer, object->size);
|
object->pointer, object->size);
|
||||||
pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
|
pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
|
||||||
@ -424,7 +419,7 @@ static void dump_object_info(struct kmemleak_object *object)
|
|||||||
pr_notice(" flags = 0x%x\n", object->flags);
|
pr_notice(" flags = 0x%x\n", object->flags);
|
||||||
pr_notice(" checksum = %u\n", object->checksum);
|
pr_notice(" checksum = %u\n", object->checksum);
|
||||||
pr_notice(" backtrace:\n");
|
pr_notice(" backtrace:\n");
|
||||||
print_stack_trace(&trace, 4);
|
stack_trace_print(object->trace, object->trace_len, 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -553,15 +548,7 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
|
|||||||
*/
|
*/
|
||||||
static int __save_stack_trace(unsigned long *trace)
|
static int __save_stack_trace(unsigned long *trace)
|
||||||
{
|
{
|
||||||
struct stack_trace stack_trace;
|
return stack_trace_save(trace, MAX_TRACE, 2);
|
||||||
|
|
||||||
stack_trace.max_entries = MAX_TRACE;
|
|
||||||
stack_trace.nr_entries = 0;
|
|
||||||
stack_trace.entries = trace;
|
|
||||||
stack_trace.skip = 2;
|
|
||||||
save_stack_trace(&stack_trace);
|
|
||||||
|
|
||||||
return stack_trace.nr_entries;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2021,13 +2008,8 @@ early_param("kmemleak", kmemleak_boot_config);
|
|||||||
|
|
||||||
static void __init print_log_trace(struct early_log *log)
|
static void __init print_log_trace(struct early_log *log)
|
||||||
{
|
{
|
||||||
struct stack_trace trace;
|
|
||||||
|
|
||||||
trace.nr_entries = log->trace_len;
|
|
||||||
trace.entries = log->trace;
|
|
||||||
|
|
||||||
pr_notice("Early log backtrace:\n");
|
pr_notice("Early log backtrace:\n");
|
||||||
print_stack_trace(&trace, 2);
|
stack_trace_print(log->trace, log->trace_len, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -58,15 +58,10 @@ static bool need_page_owner(void)
|
|||||||
static __always_inline depot_stack_handle_t create_dummy_stack(void)
|
static __always_inline depot_stack_handle_t create_dummy_stack(void)
|
||||||
{
|
{
|
||||||
unsigned long entries[4];
|
unsigned long entries[4];
|
||||||
struct stack_trace dummy;
|
unsigned int nr_entries;
|
||||||
|
|
||||||
dummy.nr_entries = 0;
|
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
|
||||||
dummy.max_entries = ARRAY_SIZE(entries);
|
return stack_depot_save(entries, nr_entries, GFP_KERNEL);
|
||||||
dummy.entries = &entries[0];
|
|
||||||
dummy.skip = 0;
|
|
||||||
|
|
||||||
save_stack_trace(&dummy);
|
|
||||||
return depot_save_stack(&dummy, GFP_KERNEL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline void register_dummy_stack(void)
|
static noinline void register_dummy_stack(void)
|
||||||
@ -120,49 +115,39 @@ void __reset_page_owner(struct page *page, unsigned int order)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool check_recursive_alloc(struct stack_trace *trace,
|
static inline bool check_recursive_alloc(unsigned long *entries,
|
||||||
unsigned long ip)
|
unsigned int nr_entries,
|
||||||
|
unsigned long ip)
|
||||||
{
|
{
|
||||||
int i;
|
unsigned int i;
|
||||||
|
|
||||||
if (!trace->nr_entries)
|
for (i = 0; i < nr_entries; i++) {
|
||||||
return false;
|
if (entries[i] == ip)
|
||||||
|
|
||||||
for (i = 0; i < trace->nr_entries; i++) {
|
|
||||||
if (trace->entries[i] == ip)
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static noinline depot_stack_handle_t save_stack(gfp_t flags)
|
static noinline depot_stack_handle_t save_stack(gfp_t flags)
|
||||||
{
|
{
|
||||||
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
|
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
|
||||||
struct stack_trace trace = {
|
|
||||||
.nr_entries = 0,
|
|
||||||
.entries = entries,
|
|
||||||
.max_entries = PAGE_OWNER_STACK_DEPTH,
|
|
||||||
.skip = 2
|
|
||||||
};
|
|
||||||
depot_stack_handle_t handle;
|
depot_stack_handle_t handle;
|
||||||
|
unsigned int nr_entries;
|
||||||
|
|
||||||
save_stack_trace(&trace);
|
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
|
||||||
if (trace.nr_entries != 0 &&
|
|
||||||
trace.entries[trace.nr_entries-1] == ULONG_MAX)
|
|
||||||
trace.nr_entries--;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to check recursion here because our request to stackdepot
|
* We need to check recursion here because our request to
|
||||||
* could trigger memory allocation to save new entry. New memory
|
* stackdepot could trigger memory allocation to save new
|
||||||
* allocation would reach here and call depot_save_stack() again
|
* entry. New memory allocation would reach here and call
|
||||||
* if we don't catch it. There is still not enough memory in stackdepot
|
* stack_depot_save_entries() again if we don't catch it. There is
|
||||||
* so it would try to allocate memory again and loop forever.
|
* still not enough memory in stackdepot so it would try to
|
||||||
|
* allocate memory again and loop forever.
|
||||||
*/
|
*/
|
||||||
if (check_recursive_alloc(&trace, _RET_IP_))
|
if (check_recursive_alloc(entries, nr_entries, _RET_IP_))
|
||||||
return dummy_handle;
|
return dummy_handle;
|
||||||
|
|
||||||
handle = depot_save_stack(&trace, flags);
|
handle = stack_depot_save(entries, nr_entries, flags);
|
||||||
if (!handle)
|
if (!handle)
|
||||||
handle = failure_handle;
|
handle = failure_handle;
|
||||||
|
|
||||||
@ -340,16 +325,10 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
|
|||||||
struct page *page, struct page_owner *page_owner,
|
struct page *page, struct page_owner *page_owner,
|
||||||
depot_stack_handle_t handle)
|
depot_stack_handle_t handle)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret, pageblock_mt, page_mt;
|
||||||
int pageblock_mt, page_mt;
|
unsigned long *entries;
|
||||||
|
unsigned int nr_entries;
|
||||||
char *kbuf;
|
char *kbuf;
|
||||||
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
|
|
||||||
struct stack_trace trace = {
|
|
||||||
.nr_entries = 0,
|
|
||||||
.entries = entries,
|
|
||||||
.max_entries = PAGE_OWNER_STACK_DEPTH,
|
|
||||||
.skip = 0
|
|
||||||
};
|
|
||||||
|
|
||||||
count = min_t(size_t, count, PAGE_SIZE);
|
count = min_t(size_t, count, PAGE_SIZE);
|
||||||
kbuf = kmalloc(count, GFP_KERNEL);
|
kbuf = kmalloc(count, GFP_KERNEL);
|
||||||
@ -378,8 +357,8 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
|
|||||||
if (ret >= count)
|
if (ret >= count)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
depot_fetch_stack(handle, &trace);
|
nr_entries = stack_depot_fetch(handle, &entries);
|
||||||
ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
|
ret += stack_trace_snprint(kbuf + ret, count - ret, entries, nr_entries, 0);
|
||||||
if (ret >= count)
|
if (ret >= count)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
@ -410,14 +389,9 @@ void __dump_page_owner(struct page *page)
|
|||||||
{
|
{
|
||||||
struct page_ext *page_ext = lookup_page_ext(page);
|
struct page_ext *page_ext = lookup_page_ext(page);
|
||||||
struct page_owner *page_owner;
|
struct page_owner *page_owner;
|
||||||
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
|
|
||||||
struct stack_trace trace = {
|
|
||||||
.nr_entries = 0,
|
|
||||||
.entries = entries,
|
|
||||||
.max_entries = PAGE_OWNER_STACK_DEPTH,
|
|
||||||
.skip = 0
|
|
||||||
};
|
|
||||||
depot_stack_handle_t handle;
|
depot_stack_handle_t handle;
|
||||||
|
unsigned long *entries;
|
||||||
|
unsigned int nr_entries;
|
||||||
gfp_t gfp_mask;
|
gfp_t gfp_mask;
|
||||||
int mt;
|
int mt;
|
||||||
|
|
||||||
@ -441,10 +415,10 @@ void __dump_page_owner(struct page *page)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
depot_fetch_stack(handle, &trace);
|
nr_entries = stack_depot_fetch(handle, &entries);
|
||||||
pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
|
pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
|
||||||
page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
|
page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
|
||||||
print_stack_trace(&trace, 0);
|
stack_trace_print(entries, nr_entries, 0);
|
||||||
|
|
||||||
if (page_owner->last_migrate_reason != -1)
|
if (page_owner->last_migrate_reason != -1)
|
||||||
pr_alert("page has been migrated, last migrate reason: %s\n",
|
pr_alert("page has been migrated, last migrate reason: %s\n",
|
||||||
|
21
mm/slub.c
21
mm/slub.c
@ -552,31 +552,22 @@ static void set_track(struct kmem_cache *s, void *object,
|
|||||||
|
|
||||||
if (addr) {
|
if (addr) {
|
||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
struct stack_trace trace;
|
unsigned int nr_entries;
|
||||||
int i;
|
|
||||||
|
|
||||||
trace.nr_entries = 0;
|
|
||||||
trace.max_entries = TRACK_ADDRS_COUNT;
|
|
||||||
trace.entries = p->addrs;
|
|
||||||
trace.skip = 3;
|
|
||||||
metadata_access_enable();
|
metadata_access_enable();
|
||||||
save_stack_trace(&trace);
|
nr_entries = stack_trace_save(p->addrs, TRACK_ADDRS_COUNT, 3);
|
||||||
metadata_access_disable();
|
metadata_access_disable();
|
||||||
|
|
||||||
/* See rant in lockdep.c */
|
if (nr_entries < TRACK_ADDRS_COUNT)
|
||||||
if (trace.nr_entries != 0 &&
|
p->addrs[nr_entries] = 0;
|
||||||
trace.entries[trace.nr_entries - 1] == ULONG_MAX)
|
|
||||||
trace.nr_entries--;
|
|
||||||
|
|
||||||
for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
|
|
||||||
p->addrs[i] = 0;
|
|
||||||
#endif
|
#endif
|
||||||
p->addr = addr;
|
p->addr = addr;
|
||||||
p->cpu = smp_processor_id();
|
p->cpu = smp_processor_id();
|
||||||
p->pid = current->pid;
|
p->pid = current->pid;
|
||||||
p->when = jiffies;
|
p->when = jiffies;
|
||||||
} else
|
} else {
|
||||||
memset(p, 0, sizeof(struct track));
|
memset(p, 0, sizeof(struct track));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_tracking(struct kmem_cache *s, void *object)
|
static void init_tracking(struct kmem_cache *s, void *object)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user