mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
ARM: 9384/2: mm: Make tlbflush routines CFI safe
Instead of avoiding CFI entirely on the TLB flush helpers, reorganize the code so that the CFI machinery can deal with it. The important things to take into account are: - functions in asm called indirectly from C need to be defined using SYM_TYPED_FUNC_START() - a reference to the asm function needs to be visible to the compiler, in order to get it to emit the typeid symbol. The latter means that defining the cpu_tlb_fns structs is best done from C code, so that the references in the static initializers will be visible to the compiler. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Tested-by: Kees Cook <keescook@chromium.org> Reviewed-by: Sami Tolvanen <samitolvanen@google.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
This commit is contained in:
parent
4cece76496
commit
6b0ef2792c
@ -62,6 +62,7 @@ obj-$(CONFIG_CPU_TLB_FEROCEON) += tlb-v4wbi.o # reuse v4wbi TLB functions
|
||||
obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
|
||||
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
|
||||
obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o
|
||||
obj-y += tlb.o
|
||||
|
||||
obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
|
||||
obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
|
||||
|
@ -338,21 +338,6 @@ ENTRY(\name\()_cache_fns)
|
||||
.size \name\()_cache_fns, . - \name\()_cache_fns
|
||||
.endm
|
||||
|
||||
.macro define_tlb_functions name:req, flags_up:req, flags_smp
|
||||
.type \name\()_tlb_fns, #object
|
||||
.align 2
|
||||
ENTRY(\name\()_tlb_fns)
|
||||
.long \name\()_flush_user_tlb_range
|
||||
.long \name\()_flush_kern_tlb_range
|
||||
.ifnb \flags_smp
|
||||
ALT_SMP(.long \flags_smp )
|
||||
ALT_UP(.long \flags_up )
|
||||
.else
|
||||
.long \flags_up
|
||||
.endif
|
||||
.size \name\()_tlb_fns, . - \name\()_tlb_fns
|
||||
.endm
|
||||
|
||||
.macro globl_equ x, y
|
||||
.globl \x
|
||||
.equ \x, \y
|
||||
|
@ -15,6 +15,7 @@
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -31,7 +32,7 @@
|
||||
* - mm - mm_struct describing address space
|
||||
*/
|
||||
.align 4
|
||||
ENTRY(fa_flush_user_tlb_range)
|
||||
SYM_TYPED_FUNC_START(fa_flush_user_tlb_range)
|
||||
vma_vm_mm ip, r2
|
||||
act_mm r3 @ get current->active_mm
|
||||
eors r3, ip, r3 @ == mm ?
|
||||
@ -46,9 +47,10 @@ ENTRY(fa_flush_user_tlb_range)
|
||||
blo 1b
|
||||
mcr p15, 0, r3, c7, c10, 4 @ data write barrier
|
||||
ret lr
|
||||
SYM_FUNC_END(fa_flush_user_tlb_range)
|
||||
|
||||
|
||||
ENTRY(fa_flush_kern_tlb_range)
|
||||
SYM_TYPED_FUNC_START(fa_flush_kern_tlb_range)
|
||||
mov r3, #0
|
||||
mcr p15, 0, r3, c7, c10, 4 @ drain WB
|
||||
bic r0, r0, #0x0ff
|
||||
@ -60,8 +62,4 @@ ENTRY(fa_flush_kern_tlb_range)
|
||||
mcr p15, 0, r3, c7, c10, 4 @ data write barrier
|
||||
mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb)
|
||||
ret lr
|
||||
|
||||
__INITDATA
|
||||
|
||||
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
|
||||
define_tlb_functions fa, fa_tlb_flags
|
||||
SYM_FUNC_END(fa_flush_kern_tlb_range)
|
||||
|
@ -11,6 +11,7 @@
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -27,7 +28,7 @@
|
||||
* - mm - mm_struct describing address space
|
||||
*/
|
||||
.align 5
|
||||
ENTRY(v4_flush_user_tlb_range)
|
||||
SYM_TYPED_FUNC_START(v4_flush_user_tlb_range)
|
||||
vma_vm_mm ip, r2
|
||||
act_mm r3 @ get current->active_mm
|
||||
eors r3, ip, r3 @ == mm ?
|
||||
@ -40,6 +41,7 @@ ENTRY(v4_flush_user_tlb_range)
|
||||
cmp r0, r1
|
||||
blo 1b
|
||||
ret lr
|
||||
SYM_FUNC_END(v4_flush_user_tlb_range)
|
||||
|
||||
/*
|
||||
* v4_flush_kern_tlb_range(start, end)
|
||||
@ -50,10 +52,11 @@ ENTRY(v4_flush_user_tlb_range)
|
||||
* - start - virtual address (may not be aligned)
|
||||
* - end - virtual address (may not be aligned)
|
||||
*/
|
||||
#ifdef CONFIG_CFI_CLANG
|
||||
SYM_TYPED_FUNC_START(v4_flush_kern_tlb_range)
|
||||
b .v4_flush_kern_tlb_range
|
||||
SYM_FUNC_END(v4_flush_kern_tlb_range)
|
||||
#else
|
||||
.globl v4_flush_kern_tlb_range
|
||||
.equ v4_flush_kern_tlb_range, .v4_flush_kern_tlb_range
|
||||
|
||||
__INITDATA
|
||||
|
||||
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
|
||||
define_tlb_functions v4, v4_tlb_flags
|
||||
#endif
|
||||
|
@ -11,6 +11,7 @@
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -27,7 +28,7 @@
|
||||
* - mm - mm_struct describing address space
|
||||
*/
|
||||
.align 5
|
||||
ENTRY(v4wb_flush_user_tlb_range)
|
||||
SYM_TYPED_FUNC_START(v4wb_flush_user_tlb_range)
|
||||
vma_vm_mm ip, r2
|
||||
act_mm r3 @ get current->active_mm
|
||||
eors r3, ip, r3 @ == mm ?
|
||||
@ -43,6 +44,7 @@ ENTRY(v4wb_flush_user_tlb_range)
|
||||
cmp r0, r1
|
||||
blo 1b
|
||||
ret lr
|
||||
SYM_FUNC_END(v4wb_flush_user_tlb_range)
|
||||
|
||||
/*
|
||||
* v4_flush_kern_tlb_range(start, end)
|
||||
@ -53,7 +55,7 @@ ENTRY(v4wb_flush_user_tlb_range)
|
||||
* - start - virtual address (may not be aligned)
|
||||
* - end - virtual address (may not be aligned)
|
||||
*/
|
||||
ENTRY(v4wb_flush_kern_tlb_range)
|
||||
SYM_TYPED_FUNC_START(v4wb_flush_kern_tlb_range)
|
||||
mov r3, #0
|
||||
mcr p15, 0, r3, c7, c10, 4 @ drain WB
|
||||
bic r0, r0, #0x0ff
|
||||
@ -64,8 +66,4 @@ ENTRY(v4wb_flush_kern_tlb_range)
|
||||
cmp r0, r1
|
||||
blo 1b
|
||||
ret lr
|
||||
|
||||
__INITDATA
|
||||
|
||||
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
|
||||
define_tlb_functions v4wb, v4wb_tlb_flags
|
||||
SYM_FUNC_END(v4wb_flush_kern_tlb_range)
|
||||
|
@ -11,6 +11,7 @@
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -26,7 +27,7 @@
|
||||
* - mm - mm_struct describing address space
|
||||
*/
|
||||
.align 5
|
||||
ENTRY(v4wbi_flush_user_tlb_range)
|
||||
SYM_TYPED_FUNC_START(v4wbi_flush_user_tlb_range)
|
||||
vma_vm_mm ip, r2
|
||||
act_mm r3 @ get current->active_mm
|
||||
eors r3, ip, r3 @ == mm ?
|
||||
@ -43,8 +44,9 @@ ENTRY(v4wbi_flush_user_tlb_range)
|
||||
cmp r0, r1
|
||||
blo 1b
|
||||
ret lr
|
||||
SYM_FUNC_END(v4wbi_flush_user_tlb_range)
|
||||
|
||||
ENTRY(v4wbi_flush_kern_tlb_range)
|
||||
SYM_TYPED_FUNC_START(v4wbi_flush_kern_tlb_range)
|
||||
mov r3, #0
|
||||
mcr p15, 0, r3, c7, c10, 4 @ drain WB
|
||||
bic r0, r0, #0x0ff
|
||||
@ -55,8 +57,4 @@ ENTRY(v4wbi_flush_kern_tlb_range)
|
||||
cmp r0, r1
|
||||
blo 1b
|
||||
ret lr
|
||||
|
||||
__INITDATA
|
||||
|
||||
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
|
||||
define_tlb_functions v4wbi, v4wbi_tlb_flags
|
||||
SYM_FUNC_END(v4wbi_flush_kern_tlb_range)
|
||||
|
@ -9,6 +9,7 @@
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/page.h>
|
||||
@ -32,7 +33,7 @@
|
||||
* - the "Invalidate single entry" instruction will invalidate
|
||||
* both the I and the D TLBs on Harvard-style TLBs
|
||||
*/
|
||||
ENTRY(v6wbi_flush_user_tlb_range)
|
||||
SYM_TYPED_FUNC_START(v6wbi_flush_user_tlb_range)
|
||||
vma_vm_mm r3, r2 @ get vma->vm_mm
|
||||
mov ip, #0
|
||||
mmid r3, r3 @ get vm_mm->context.id
|
||||
@ -56,6 +57,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
|
||||
blo 1b
|
||||
mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
|
||||
ret lr
|
||||
SYM_FUNC_END(v6wbi_flush_user_tlb_range)
|
||||
|
||||
/*
|
||||
* v6wbi_flush_kern_tlb_range(start,end)
|
||||
@ -65,7 +67,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
|
||||
* - start - start address (may not be aligned)
|
||||
* - end - end address (exclusive, may not be aligned)
|
||||
*/
|
||||
ENTRY(v6wbi_flush_kern_tlb_range)
|
||||
SYM_TYPED_FUNC_START(v6wbi_flush_kern_tlb_range)
|
||||
mov r2, #0
|
||||
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
|
||||
mov r0, r0, lsr #PAGE_SHIFT @ align address
|
||||
@ -85,8 +87,4 @@ ENTRY(v6wbi_flush_kern_tlb_range)
|
||||
mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
|
||||
mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
|
||||
ret lr
|
||||
|
||||
__INIT
|
||||
|
||||
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
|
||||
define_tlb_functions v6wbi, v6wbi_tlb_flags
|
||||
SYM_FUNC_END(v6wbi_flush_kern_tlb_range)
|
||||
|
@ -10,6 +10,7 @@
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/cfi_types.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/page.h>
|
||||
@ -31,7 +32,7 @@
|
||||
* - the "Invalidate single entry" instruction will invalidate
|
||||
* both the I and the D TLBs on Harvard-style TLBs
|
||||
*/
|
||||
ENTRY(v7wbi_flush_user_tlb_range)
|
||||
SYM_TYPED_FUNC_START(v7wbi_flush_user_tlb_range)
|
||||
vma_vm_mm r3, r2 @ get vma->vm_mm
|
||||
mmid r3, r3 @ get vm_mm->context.id
|
||||
dsb ish
|
||||
@ -57,7 +58,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
|
||||
blo 1b
|
||||
dsb ish
|
||||
ret lr
|
||||
ENDPROC(v7wbi_flush_user_tlb_range)
|
||||
SYM_FUNC_END(v7wbi_flush_user_tlb_range)
|
||||
|
||||
/*
|
||||
* v7wbi_flush_kern_tlb_range(start,end)
|
||||
@ -67,7 +68,7 @@ ENDPROC(v7wbi_flush_user_tlb_range)
|
||||
* - start - start address (may not be aligned)
|
||||
* - end - end address (exclusive, may not be aligned)
|
||||
*/
|
||||
ENTRY(v7wbi_flush_kern_tlb_range)
|
||||
SYM_TYPED_FUNC_START(v7wbi_flush_kern_tlb_range)
|
||||
dsb ish
|
||||
mov r0, r0, lsr #PAGE_SHIFT @ align address
|
||||
mov r1, r1, lsr #PAGE_SHIFT
|
||||
@ -86,9 +87,4 @@ ENTRY(v7wbi_flush_kern_tlb_range)
|
||||
dsb ish
|
||||
isb
|
||||
ret lr
|
||||
ENDPROC(v7wbi_flush_kern_tlb_range)
|
||||
|
||||
__INIT
|
||||
|
||||
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
|
||||
define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp
|
||||
SYM_FUNC_END(v7wbi_flush_kern_tlb_range)
|
||||
|
84
arch/arm/mm/tlb.c
Normal file
84
arch/arm/mm/tlb.c
Normal file
@ -0,0 +1,84 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
// Copyright 2024 Google LLC
|
||||
// Author: Ard Biesheuvel <ardb@google.com>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#ifdef CONFIG_CPU_TLB_V4WT
|
||||
void v4_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
|
||||
void v4_flush_kern_tlb_range(unsigned long, unsigned long);
|
||||
|
||||
struct cpu_tlb_fns v4_tlb_fns __initconst = {
|
||||
.flush_user_range = v4_flush_user_tlb_range,
|
||||
.flush_kern_range = v4_flush_kern_tlb_range,
|
||||
.tlb_flags = v4_tlb_flags,
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_TLB_V4WB
|
||||
void v4wb_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
|
||||
void v4wb_flush_kern_tlb_range(unsigned long, unsigned long);
|
||||
|
||||
struct cpu_tlb_fns v4wb_tlb_fns __initconst = {
|
||||
.flush_user_range = v4wb_flush_user_tlb_range,
|
||||
.flush_kern_range = v4wb_flush_kern_tlb_range,
|
||||
.tlb_flags = v4wb_tlb_flags,
|
||||
};
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CPU_TLB_V4WBI) || defined(CONFIG_CPU_TLB_FEROCEON)
|
||||
void v4wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
|
||||
void v4wbi_flush_kern_tlb_range(unsigned long, unsigned long);
|
||||
|
||||
struct cpu_tlb_fns v4wbi_tlb_fns __initconst = {
|
||||
.flush_user_range = v4wbi_flush_user_tlb_range,
|
||||
.flush_kern_range = v4wbi_flush_kern_tlb_range,
|
||||
.tlb_flags = v4wbi_tlb_flags,
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_TLB_V6
|
||||
void v6wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
|
||||
void v6wbi_flush_kern_tlb_range(unsigned long, unsigned long);
|
||||
|
||||
struct cpu_tlb_fns v6wbi_tlb_fns __initconst = {
|
||||
.flush_user_range = v6wbi_flush_user_tlb_range,
|
||||
.flush_kern_range = v6wbi_flush_kern_tlb_range,
|
||||
.tlb_flags = v6wbi_tlb_flags,
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_TLB_V7
|
||||
void v7wbi_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
|
||||
void v7wbi_flush_kern_tlb_range(unsigned long, unsigned long);
|
||||
|
||||
struct cpu_tlb_fns v7wbi_tlb_fns __initconst = {
|
||||
.flush_user_range = v7wbi_flush_user_tlb_range,
|
||||
.flush_kern_range = v7wbi_flush_kern_tlb_range,
|
||||
.tlb_flags = IS_ENABLED(CONFIG_SMP) ? v7wbi_tlb_flags_smp
|
||||
: v7wbi_tlb_flags_up,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP_ON_UP
|
||||
/* This will be run-time patched so the offset better be right */
|
||||
static_assert(offsetof(struct cpu_tlb_fns, tlb_flags) == 8);
|
||||
|
||||
asm(" .pushsection \".alt.smp.init\", \"a\" \n" \
|
||||
" .align 2 \n" \
|
||||
" .long v7wbi_tlb_fns + 8 - . \n" \
|
||||
" .long " __stringify(v7wbi_tlb_flags_up) " \n" \
|
||||
" .popsection \n");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_TLB_FA
|
||||
void fa_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
|
||||
void fa_flush_kern_tlb_range(unsigned long, unsigned long);
|
||||
|
||||
struct cpu_tlb_fns fa_tlb_fns __initconst = {
|
||||
.flush_user_range = fa_flush_user_tlb_range,
|
||||
.flush_kern_range = fa_flush_kern_tlb_range,
|
||||
.tlb_flags = fa_tlb_flags,
|
||||
};
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user