2018-08-17 01:16:58 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _LINUX_STACKLEAK_H
|
|
|
|
#define _LINUX_STACKLEAK_H
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/sched/task_stack.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check that the poison value points to the unused hole in the
|
|
|
|
* virtual memory map for your platform.
|
|
|
|
*/
|
|
|
|
#define STACKLEAK_POISON -0xBEEF
|
|
|
|
#define STACKLEAK_SEARCH_DEPTH 128
|
|
|
|
|
|
|
|
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
|
stackleak: rework stack low bound handling
In stackleak_task_init(), stackleak_track_stack(), and
__stackleak_erase(), we open-code skipping the STACK_END_MAGIC at the
bottom of the stack. Each case is implemented slightly differently, and
only the __stackleak_erase() case is commented.
In stackleak_task_init() and stackleak_track_stack() we unconditionally
add sizeof(unsigned long) to the lowest stack address. In
stackleak_task_init() we use end_of_stack() for this, and in
stackleak_track_stack() we use task_stack_page(). In __stackleak_erase()
we handle this by detecting if `kstack_ptr` has hit the stack end
boundary, and if so, conditionally moving it above the magic.
This patch adds a new stackleak_task_low_bound() helper which is used in
all three cases, which unconditionally adds sizeof(unsigned long) to the
lowest address on the task stack, with commentary as to why. This uses
end_of_stack() as stackleak_task_init() did prior to this patch, as this
is consistent with the code in kernel/fork.c which initializes the
STACK_END_MAGIC value.
In __stackleak_erase() we no longer need to check whether we've spilled
into the STACK_END_MAGIC value, as stackleak_track_stack() ensures that
`current->lowest_stack` stops immediately above this, and similarly the
poison scan will stop immediately above this.
For stackleak_task_init() and stackleak_track_stack() this results in no
change to code generation. For __stackleak_erase() the generated
assembly is slightly simpler and shorter.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Alexander Popov <alex.popov@linux.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20220427173128.2603085-5-mark.rutland@arm.com
2022-04-27 18:31:19 +01:00
|
|
|
/*
|
|
|
|
* The lowest address on tsk's stack which we can plausibly erase.
|
|
|
|
*/
|
|
|
|
static __always_inline unsigned long
|
|
|
|
stackleak_task_low_bound(const struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The lowest unsigned long on the task stack contains STACK_END_MAGIC,
|
|
|
|
* which we must not corrupt.
|
|
|
|
*/
|
|
|
|
return (unsigned long)end_of_stack(tsk) + sizeof(unsigned long);
|
|
|
|
}
|
|
|
|
|
2018-08-17 01:16:58 +03:00
|
|
|
static inline void stackleak_task_init(struct task_struct *t)
|
|
|
|
{
|
stackleak: rework stack low bound handling
In stackleak_task_init(), stackleak_track_stack(), and
__stackleak_erase(), we open-code skipping the STACK_END_MAGIC at the
bottom of the stack. Each case is implemented slightly differently, and
only the __stackleak_erase() case is commented.
In stackleak_task_init() and stackleak_track_stack() we unconditionally
add sizeof(unsigned long) to the lowest stack address. In
stackleak_task_init() we use end_of_stack() for this, and in
stackleak_track_stack() we use task_stack_page(). In __stackleak_erase()
we handle this by detecting if `kstack_ptr` has hit the stack end
boundary, and if so, conditionally moving it above the magic.
This patch adds a new stackleak_task_low_bound() helper which is used in
all three cases, which unconditionally adds sizeof(unsigned long) to the
lowest address on the task stack, with commentary as to why. This uses
end_of_stack() as stackleak_task_init() did prior to this patch, as this
is consistent with the code in kernel/fork.c which initializes the
STACK_END_MAGIC value.
In __stackleak_erase() we no longer need to check whether we've spilled
into the STACK_END_MAGIC value, as stackleak_track_stack() ensures that
`current->lowest_stack` stops immediately above this, and similarly the
poison scan will stop immediately above this.
For stackleak_task_init() and stackleak_track_stack() this results in no
change to code generation. For __stackleak_erase() the generated
assembly is slightly simpler and shorter.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Alexander Popov <alex.popov@linux.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20220427173128.2603085-5-mark.rutland@arm.com
2022-04-27 18:31:19 +01:00
|
|
|
t->lowest_stack = stackleak_task_low_bound(t);
|
2018-08-17 01:17:01 +03:00
|
|
|
# ifdef CONFIG_STACKLEAK_METRICS
|
|
|
|
t->prev_lowest_stack = t->lowest_stack;
|
|
|
|
# endif
|
2018-08-17 01:16:58 +03:00
|
|
|
}
|
2018-08-17 01:17:03 +03:00
|
|
|
|
2018-08-17 01:16:58 +03:00
|
|
|
#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
|
|
|
|
static inline void stackleak_task_init(struct task_struct *t) { }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif
|