mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
8cb37a5974
The randomize_kstack_offset feature is unconditionally compiled in when the architecture supports it. To add constraints on compiler versions, we require a dedicated Kconfig variable. Therefore, introduce RANDOMIZE_KSTACK_OFFSET. Furthermore, this option is now also configurable by EXPERT kernels: while the feature is supposed to have zero performance overhead when disabled, due to its use of static branches, there are few cases where giving a distribution the option to disable the feature entirely makes sense. For example, in very resource constrained environments, which would never enable the feature to begin with, in which case the additional kernel code size increase would be redundant. Signed-off-by: Marco Elver <elver@google.com> Reviewed-by: Nathan Chancellor <nathan@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Kees Cook <keescook@chromium.org> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20220131090521.1947110-1-elver@google.com
60 lines
2.2 KiB
C
60 lines
2.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
#ifndef _LINUX_RANDOMIZE_KSTACK_H
|
|
#define _LINUX_RANDOMIZE_KSTACK_H
|
|
|
|
#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
|
|
#include <linux/kernel.h>
|
|
#include <linux/jump_label.h>
|
|
#include <linux/percpu-defs.h>
|
|
|
|
DECLARE_STATIC_KEY_MAYBE(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
|
|
randomize_kstack_offset);
|
|
DECLARE_PER_CPU(u32, kstack_offset);
|
|
|
|
/*
|
|
* Do not use this anywhere else in the kernel. This is used here because
|
|
* it provides an arch-agnostic way to grow the stack with correct
|
|
* alignment. Also, since this use is being explicitly masked to a max of
|
|
* 10 bits, stack-clash style attacks are unlikely. For more details see
|
|
* "VLAs" in Documentation/process/deprecated.rst
|
|
*/
|
|
void *__builtin_alloca(size_t size);
|
|
/*
|
|
* Use, at most, 10 bits of entropy. We explicitly cap this to keep the
|
|
* "VLA" from being unbounded (see above). 10 bits leaves enough room for
|
|
* per-arch offset masks to reduce entropy (by removing higher bits, since
|
|
* high entropy may overly constrain usable stack space), and for
|
|
* compiler/arch-specific stack alignment to remove the lower bits.
|
|
*/
|
|
#define KSTACK_OFFSET_MAX(x) ((x) & 0x3FF)
|
|
|
|
/*
|
|
* These macros must be used during syscall entry when interrupts and
|
|
* preempt are disabled, and after user registers have been stored to
|
|
* the stack.
|
|
*/
|
|
#define add_random_kstack_offset() do { \
|
|
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
|
|
&randomize_kstack_offset)) { \
|
|
u32 offset = raw_cpu_read(kstack_offset); \
|
|
u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \
|
|
/* Keep allocation even after "ptr" loses scope. */ \
|
|
asm volatile("" :: "r"(ptr) : "memory"); \
|
|
} \
|
|
} while (0)
|
|
|
|
#define choose_random_kstack_offset(rand) do { \
|
|
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
|
|
&randomize_kstack_offset)) { \
|
|
u32 offset = raw_cpu_read(kstack_offset); \
|
|
offset ^= (rand); \
|
|
raw_cpu_write(kstack_offset, offset); \
|
|
} \
|
|
} while (0)
|
|
#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
|
|
#define add_random_kstack_offset() do { } while (0)
|
|
#define choose_random_kstack_offset(rand) do { } while (0)
|
|
#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */
|
|
|
|
#endif
|