mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-04 04:04:19 +00:00
sh: initial stack protector support.
This implements basic -fstack-protector support, based on the early ARM
version in c743f38013
. The SMP case is
limited to the initial canary value, while the UP case handles per-task
granularity (limited to 32-bit sh until a new enough sh64 compiler
manifests itself).
Signed-off-by: Filippo Arcidiacono <filippo.arcidiacono@st.com>
Reviewed-by: Carmelo Amoroso <carmelo.amoroso@st.com>
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
932e9f352b
commit
5d920bb929
@ -685,6 +685,20 @@ config SECCOMP
|
|||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
|
config CC_STACKPROTECTOR
|
||||||
|
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
|
||||||
|
depends on SUPERH32 && EXPERIMENTAL
|
||||||
|
help
|
||||||
|
This option turns on the -fstack-protector GCC feature. This
|
||||||
|
feature puts, at the beginning of functions, a canary value on
|
||||||
|
the stack just before the return address, and validates
|
||||||
|
the value just before actually returning. Stack based buffer
|
||||||
|
overflows (that need to overwrite this return address) now also
|
||||||
|
overwrite the canary, which gets detected and the attack is then
|
||||||
|
neutralized via a kernel panic.
|
||||||
|
|
||||||
|
This feature requires gcc version 4.2 or above.
|
||||||
|
|
||||||
config SMP
|
config SMP
|
||||||
bool "Symmetric multi-processing support"
|
bool "Symmetric multi-processing support"
|
||||||
depends on SYS_SUPPORTS_SMP
|
depends on SYS_SUPPORTS_SMP
|
||||||
|
@ -199,6 +199,10 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y)
|
|||||||
KBUILD_CFLAGS += -fasynchronous-unwind-tables
|
KBUILD_CFLAGS += -fasynchronous-unwind-tables
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifeq ($(CONFIG_CC_STACKPROTECTOR),y)
|
||||||
|
KBUILD_CFLAGS += -fstack-protector
|
||||||
|
endif
|
||||||
|
|
||||||
libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y)
|
libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y)
|
||||||
libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y)
|
libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y)
|
||||||
|
|
||||||
|
27
arch/sh/include/asm/stackprotector.h
Normal file
27
arch/sh/include/asm/stackprotector.h
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
#ifndef __ASM_SH_STACKPROTECTOR_H
|
||||||
|
#define __ASM_SH_STACKPROTECTOR_H
|
||||||
|
|
||||||
|
#include <linux/random.h>
|
||||||
|
#include <linux/version.h>
|
||||||
|
|
||||||
|
extern unsigned long __stack_chk_guard;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize the stackprotector canary value.
|
||||||
|
*
|
||||||
|
* NOTE: this must only be called from functions that never return,
|
||||||
|
* and it must always be inlined.
|
||||||
|
*/
|
||||||
|
static __always_inline void boot_init_stack_canary(void)
|
||||||
|
{
|
||||||
|
unsigned long canary;
|
||||||
|
|
||||||
|
/* Try to get a semi random initial value. */
|
||||||
|
get_random_bytes(&canary, sizeof(canary));
|
||||||
|
canary ^= LINUX_VERSION_CODE;
|
||||||
|
|
||||||
|
current->stack_canary = canary;
|
||||||
|
__stack_chk_guard = current->stack_canary;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* __ASM_SH_STACKPROTECTOR_H */
|
@ -2,10 +2,17 @@
|
|||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
#include <linux/export.h>
|
||||||
|
#include <linux/stackprotector.h>
|
||||||
|
|
||||||
struct kmem_cache *task_xstate_cachep = NULL;
|
struct kmem_cache *task_xstate_cachep = NULL;
|
||||||
unsigned int xstate_size;
|
unsigned int xstate_size;
|
||||||
|
|
||||||
|
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||||
|
unsigned long __stack_chk_guard __read_mostly;
|
||||||
|
EXPORT_SYMBOL(__stack_chk_guard);
|
||||||
|
#endif
|
||||||
|
|
||||||
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||||
{
|
{
|
||||||
*dst = *src;
|
*dst = *src;
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
#include <linux/ftrace.h>
|
#include <linux/ftrace.h>
|
||||||
#include <linux/hw_breakpoint.h>
|
#include <linux/hw_breakpoint.h>
|
||||||
#include <linux/prefetch.h>
|
#include <linux/prefetch.h>
|
||||||
|
#include <linux/stackprotector.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/fpu.h>
|
#include <asm/fpu.h>
|
||||||
@ -220,6 +221,10 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
|
|||||||
{
|
{
|
||||||
struct thread_struct *next_t = &next->thread;
|
struct thread_struct *next_t = &next->thread;
|
||||||
|
|
||||||
|
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||||
|
__stack_chk_guard = next->stack_canary;
|
||||||
|
#endif
|
||||||
|
|
||||||
unlazy_fpu(prev, task_pt_regs(prev));
|
unlazy_fpu(prev, task_pt_regs(prev));
|
||||||
|
|
||||||
/* we're going to use this soon, after a few expensive things */
|
/* we're going to use this soon, after a few expensive things */
|
||||||
|
Loading…
Reference in New Issue
Block a user