linux/arch/arm/mm/tlb-v6.S
Ard Biesheuvel 6b0ef2792c ARM: 9384/2: mm: Make tlbflush routines CFI safe
Instead of avoiding CFI entirely on the TLB flush helpers, reorganize
the code so that the CFI machinery can deal with it. The important
things to take into account are:
- functions in asm called indirectly from C need to be defined using
  SYM_TYPED_FUNC_START()
- a reference to the asm function needs to be visible to the compiler,
  in order to get it to emit the typeid symbol.

The latter means that defining the cpu_tlb_fns structs is best done from
C code, so that the references in the static initializers will be
visible to the compiler.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Tested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2024-04-29 14:14:15 +01:00

91 lines
2.5 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/mm/tlb-v6.S
*
* Copyright (C) 1997-2002 Russell King
*
* ARM architecture version 6 TLB handling functions.
* These assume a split I/D TLB.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
#define HARVARD_TLB
.arch armv6
/*
* v6wbi_flush_user_tlb_range(start, end, vma)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vm_area_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
SYM_TYPED_FUNC_START(v6wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mov ip, #0
mmid r3, r3 @ get vm_mm->context.id
mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
asid r3, r3 @ mask ASID
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
vma_vm_flags r2, r2 @ get vma->vm_flags
1:
#ifdef HARVARD_TLB
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1)
tst r2, #VM_EXEC @ Executable area ?
mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1)
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA (was 1)
#endif
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
ret lr
SYM_FUNC_END(v6wbi_flush_user_tlb_range)
/*
* v6wbi_flush_kern_tlb_range(start,end)
*
* Invalidate a range of kernel TLB entries
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
SYM_TYPED_FUNC_START(v6wbi_flush_kern_tlb_range)
mov r2, #0
mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
#ifdef HARVARD_TLB
mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA
mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA
#else
mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate MVA
#endif
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
ret lr
SYM_FUNC_END(v6wbi_flush_kern_tlb_range)