mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
78eb4ea25c
const qualify the struct ctl_table argument in the proc_handler function signatures. This is a prerequisite to moving the static ctl_table structs into .rodata data which will ensure that proc_handler function pointers cannot be modified. This patch has been generated by the following coccinelle script: ``` virtual patch @r1@ identifier ctl, write, buffer, lenp, ppos; identifier func !~ "appldata_(timer|interval)_handler|sched_(rt|rr)_handler|rds_tcp_skbuf_handler|proc_sctp_do_(hmac_alg|rto_min|rto_max|udp_port|alpha_beta|auth|probe_interval)"; @@ int func( - struct ctl_table *ctl + const struct ctl_table *ctl ,int write, void *buffer, size_t *lenp, loff_t *ppos); @r2@ identifier func, ctl, write, buffer, lenp, ppos; @@ int func( - struct ctl_table *ctl + const struct ctl_table *ctl ,int write, void *buffer, size_t *lenp, loff_t *ppos) { ... } @r3@ identifier func; @@ int func( - struct ctl_table * + const struct ctl_table * ,int , void *, size_t *, loff_t *); @r4@ identifier func, ctl; @@ int func( - struct ctl_table *ctl + const struct ctl_table *ctl ,int , void *, size_t *, loff_t *); @r5@ identifier func, write, buffer, lenp, ppos; @@ int func( - struct ctl_table * + const struct ctl_table * ,int write, void *buffer, size_t *lenp, loff_t *ppos); ``` * Code formatting was adjusted in xfs_sysctl.c to comply with code conventions. The xfs_stats_clear_proc_handler, xfs_panic_mask_proc_handler and xfs_deprecated_dointvec_minmax where adjusted. * The ctl_table argument in proc_watchdog_common was const qualified. This is called from a proc_handler itself and is calling back into another proc_handler, making it necessary to change it as part of the proc_handler migration. Co-developed-by: Thomas Weißschuh <linux@weissschuh.net> Signed-off-by: Thomas Weißschuh <linux@weissschuh.net> Co-developed-by: Joel Granados <j.granados@samsung.com> Signed-off-by: Joel Granados <j.granados@samsung.com>
177 lines
4.7 KiB
C
177 lines
4.7 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* This code fills the used part of the kernel stack with a poison value
|
|
* before returning to userspace. It's part of the STACKLEAK feature
|
|
* ported from grsecurity/PaX.
|
|
*
|
|
* Author: Alexander Popov <alex.popov@linux.com>
|
|
*
|
|
* STACKLEAK reduces the information which kernel stack leak bugs can
|
|
* reveal and blocks some uninitialized stack variable attacks.
|
|
*/
|
|
|
|
#include <linux/stackleak.h>
|
|
#include <linux/kprobes.h>
|
|
|
|
#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
|
|
#include <linux/jump_label.h>
|
|
#include <linux/sysctl.h>
|
|
#include <linux/init.h>
|
|
|
|
static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
static int stack_erasing_sysctl(const struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *lenp, loff_t *ppos)
|
|
{
|
|
int ret = 0;
|
|
int state = !static_branch_unlikely(&stack_erasing_bypass);
|
|
int prev_state = state;
|
|
struct ctl_table table_copy = *table;
|
|
|
|
table_copy.data = &state;
|
|
ret = proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos);
|
|
state = !!state;
|
|
if (ret || !write || state == prev_state)
|
|
return ret;
|
|
|
|
if (state)
|
|
static_branch_disable(&stack_erasing_bypass);
|
|
else
|
|
static_branch_enable(&stack_erasing_bypass);
|
|
|
|
pr_warn("stackleak: kernel stack erasing is %s\n",
|
|
state ? "enabled" : "disabled");
|
|
return ret;
|
|
}
|
|
static struct ctl_table stackleak_sysctls[] = {
|
|
{
|
|
.procname = "stack_erasing",
|
|
.data = NULL,
|
|
.maxlen = sizeof(int),
|
|
.mode = 0600,
|
|
.proc_handler = stack_erasing_sysctl,
|
|
.extra1 = SYSCTL_ZERO,
|
|
.extra2 = SYSCTL_ONE,
|
|
},
|
|
};
|
|
|
|
static int __init stackleak_sysctls_init(void)
|
|
{
|
|
register_sysctl_init("kernel", stackleak_sysctls);
|
|
return 0;
|
|
}
|
|
late_initcall(stackleak_sysctls_init);
|
|
#endif /* CONFIG_SYSCTL */
|
|
|
|
#define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
|
|
#else
|
|
#define skip_erasing() false
|
|
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
|
|
|
|
#ifndef __stackleak_poison
|
|
static __always_inline void __stackleak_poison(unsigned long erase_low,
|
|
unsigned long erase_high,
|
|
unsigned long poison)
|
|
{
|
|
while (erase_low < erase_high) {
|
|
*(unsigned long *)erase_low = poison;
|
|
erase_low += sizeof(unsigned long);
|
|
}
|
|
}
|
|
#endif
|
|
|
|
static __always_inline void __stackleak_erase(bool on_task_stack)
|
|
{
|
|
const unsigned long task_stack_low = stackleak_task_low_bound(current);
|
|
const unsigned long task_stack_high = stackleak_task_high_bound(current);
|
|
unsigned long erase_low, erase_high;
|
|
|
|
erase_low = stackleak_find_top_of_poison(task_stack_low,
|
|
current->lowest_stack);
|
|
|
|
#ifdef CONFIG_STACKLEAK_METRICS
|
|
current->prev_lowest_stack = erase_low;
|
|
#endif
|
|
|
|
/*
|
|
* Write poison to the task's stack between 'erase_low' and
|
|
* 'erase_high'.
|
|
*
|
|
* If we're running on a different stack (e.g. an entry trampoline
|
|
* stack) we can erase everything below the pt_regs at the top of the
|
|
* task stack.
|
|
*
|
|
* If we're running on the task stack itself, we must not clobber any
|
|
* stack used by this function and its caller. We assume that this
|
|
* function has a fixed-size stack frame, and the current stack pointer
|
|
* doesn't change while we write poison.
|
|
*/
|
|
if (on_task_stack)
|
|
erase_high = current_stack_pointer;
|
|
else
|
|
erase_high = task_stack_high;
|
|
|
|
__stackleak_poison(erase_low, erase_high, STACKLEAK_POISON);
|
|
|
|
/* Reset the 'lowest_stack' value for the next syscall */
|
|
current->lowest_stack = task_stack_high;
|
|
}
|
|
|
|
/*
|
|
* Erase and poison the portion of the task stack used since the last erase.
|
|
* Can be called from the task stack or an entry stack when the task stack is
|
|
* no longer in use.
|
|
*/
|
|
asmlinkage void noinstr stackleak_erase(void)
|
|
{
|
|
if (skip_erasing())
|
|
return;
|
|
|
|
__stackleak_erase(on_thread_stack());
|
|
}
|
|
|
|
/*
|
|
* Erase and poison the portion of the task stack used since the last erase.
|
|
* Can only be called from the task stack.
|
|
*/
|
|
asmlinkage void noinstr stackleak_erase_on_task_stack(void)
|
|
{
|
|
if (skip_erasing())
|
|
return;
|
|
|
|
__stackleak_erase(true);
|
|
}
|
|
|
|
/*
|
|
* Erase and poison the portion of the task stack used since the last erase.
|
|
* Can only be called from a stack other than the task stack.
|
|
*/
|
|
asmlinkage void noinstr stackleak_erase_off_task_stack(void)
|
|
{
|
|
if (skip_erasing())
|
|
return;
|
|
|
|
__stackleak_erase(false);
|
|
}
|
|
|
|
void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
|
|
{
|
|
unsigned long sp = current_stack_pointer;
|
|
|
|
/*
|
|
* Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than
|
|
* STACKLEAK_SEARCH_DEPTH makes the poison search in
|
|
* stackleak_erase() unreliable. Let's prevent that.
|
|
*/
|
|
BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH);
|
|
|
|
/* 'lowest_stack' should be aligned on the register width boundary */
|
|
sp = ALIGN(sp, sizeof(unsigned long));
|
|
if (sp < current->lowest_stack &&
|
|
sp >= stackleak_task_low_bound(current)) {
|
|
current->lowest_stack = sp;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(stackleak_track_stack);
|