mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
c4238686f9
We found below OOB crash: [ 33.452494] ================================================================== [ 33.453513] BUG: KASAN: stack-out-of-bounds in refresh_cpu_vm_stats.constprop.0+0xcc/0x2ec [ 33.454660] Write of size 164 at addr c1d03d30 by task swapper/0/0 [ 33.455515] [ 33.455767] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G O 6.1.25-mainline #1 [ 33.456880] Hardware name: Generic DT based system [ 33.457555] unwind_backtrace from show_stack+0x18/0x1c [ 33.458326] show_stack from dump_stack_lvl+0x40/0x4c [ 33.459072] dump_stack_lvl from print_report+0x158/0x4a4 [ 33.459863] print_report from kasan_report+0x9c/0x148 [ 33.460616] kasan_report from kasan_check_range+0x94/0x1a0 [ 33.461424] kasan_check_range from memset+0x20/0x3c [ 33.462157] memset from refresh_cpu_vm_stats.constprop.0+0xcc/0x2ec [ 33.463064] refresh_cpu_vm_stats.constprop.0 from tick_nohz_idle_stop_tick+0x180/0x53c [ 33.464181] tick_nohz_idle_stop_tick from do_idle+0x264/0x354 [ 33.465029] do_idle from cpu_startup_entry+0x20/0x24 [ 33.465769] cpu_startup_entry from rest_init+0xf0/0xf4 [ 33.466528] rest_init from arch_post_acpi_subsys_init+0x0/0x18 [ 33.467397] [ 33.467644] The buggy address belongs to stack of task swapper/0/0 [ 33.468493] and is located at offset 112 in frame: [ 33.469172] refresh_cpu_vm_stats.constprop.0+0x0/0x2ec [ 33.469917] [ 33.470165] This frame has 2 objects: [ 33.470696] [32, 76) 'global_zone_diff' [ 33.470729] [112, 276) 'global_node_diff' [ 33.471294] [ 33.472095] The buggy address belongs to the physical page: [ 33.472862] page:3cd72da8 refcount:1 mapcount:0 mapping:00000000 index:0x0 pfn:0x41d03 [ 33.473944] flags: 0x1000(reserved|zone=0) [ 33.474565] raw: 00001000 ed741470 ed741470 00000000 00000000 00000000 ffffffff 00000001 [ 33.475656] raw: 00000000 [ 33.476050] page dumped because: kasan: bad access detected [ 33.476816] [ 33.477061] Memory state around the buggy address: [ 33.477732] c1d03c00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 [ 33.478630] c1d03c80: 00 00 00 00 00 00 00 00 f1 f1 f1 f1 00 00 00 00 [ 33.479526] >c1d03d00: 00 04 f2 f2 f2 f2 00 00 00 00 00 00 f1 f1 f1 f1 [ 33.480415] ^ [ 33.481195] c1d03d80: 00 00 00 00 00 00 00 00 00 00 04 f3 f3 f3 f3 f3 [ 33.482088] c1d03e00: f3 f3 f3 f3 00 00 00 00 00 00 00 00 00 00 00 00 [ 33.482978] ================================================================== We find the root cause of this OOB is that arm does not clear stale stack poison in the case of cpuidle. This patch refer to arch/arm64/kernel/sleep.S to resolve this issue. From cited commit [1] that explain the problem Functions which the compiler has instrumented for KASAN place poison on the stack shadow upon entry and remove this poison prior to returning. In the case of cpuidle, CPUs exit the kernel a number of levels deep in C code. Any instrumented functions on this critical path will leave portions of the stack shadow poisoned. If CPUs lose context and return to the kernel via a cold path, we restore a prior context saved in __cpu_suspend_enter are forgotten, and we never remove the poison they placed in the stack shadow area by functions calls between this and the actual exit of the kernel. Thus, (depending on stackframe layout) subsequent calls to instrumented functions may hit this stale poison, resulting in (spurious) KASAN splats to the console. To avoid this, clear any stale poison from the idle thread for a CPU prior to bringing a CPU online. From cited commit [2] Extend to check for CONFIG_KASAN_STACK [1] commit0d97e6d802
("arm64: kasan: clear stale stack poison") [2] commitd56a9ef84b
("kasan, arm64: unpoison stack only with CONFIG_KASAN_STACK") Signed-off-by: Boy Wu <boy.wu@mediatek.com> Reviewed-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Andrey Ryabinin <ryabinin.a.a@gmail.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Fixes:5615f69bc2
("ARM: 9016/2: Initialize the mapping of KASan shadow memory") Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
199 lines
5.6 KiB
ArmAsm
199 lines
5.6 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#include <linux/linkage.h>
|
|
#include <linux/threads.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/glue-cache.h>
|
|
#include <asm/glue-proc.h>
|
|
.text
|
|
|
|
/*
|
|
* Implementation of MPIDR hash algorithm through shifting
|
|
* and OR'ing.
|
|
*
|
|
* @dst: register containing hash result
|
|
* @rs0: register containing affinity level 0 bit shift
|
|
* @rs1: register containing affinity level 1 bit shift
|
|
* @rs2: register containing affinity level 2 bit shift
|
|
* @mpidr: register containing MPIDR value
|
|
* @mask: register containing MPIDR mask
|
|
*
|
|
* Pseudo C-code:
|
|
*
|
|
*u32 dst;
|
|
*
|
|
*compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) {
|
|
* u32 aff0, aff1, aff2;
|
|
* u32 mpidr_masked = mpidr & mask;
|
|
* aff0 = mpidr_masked & 0xff;
|
|
* aff1 = mpidr_masked & 0xff00;
|
|
* aff2 = mpidr_masked & 0xff0000;
|
|
* dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2);
|
|
*}
|
|
* Input registers: rs0, rs1, rs2, mpidr, mask
|
|
* Output register: dst
|
|
* Note: input and output registers must be disjoint register sets
|
|
(eg: a macro instance with mpidr = r1 and dst = r1 is invalid)
|
|
*/
|
|
.macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask
|
|
and \mpidr, \mpidr, \mask @ mask out MPIDR bits
|
|
and \dst, \mpidr, #0xff @ mask=aff0
|
|
ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0
|
|
THUMB( lsr \dst, \dst, \rs0 )
|
|
and \mask, \mpidr, #0xff00 @ mask = aff1
|
|
ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1)
|
|
THUMB( lsr \mask, \mask, \rs1 )
|
|
THUMB( orr \dst, \dst, \mask )
|
|
and \mask, \mpidr, #0xff0000 @ mask = aff2
|
|
ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2)
|
|
THUMB( lsr \mask, \mask, \rs2 )
|
|
THUMB( orr \dst, \dst, \mask )
|
|
.endm
|
|
|
|
/*
|
|
* Save CPU state for a suspend. This saves the CPU general purpose
|
|
* registers, and allocates space on the kernel stack to save the CPU
|
|
* specific registers and some other data for resume.
|
|
* r0 = suspend function arg0
|
|
* r1 = suspend function
|
|
* r2 = MPIDR value the resuming CPU will use
|
|
*/
|
|
ENTRY(__cpu_suspend)
|
|
stmfd sp!, {r4 - r11, lr}
|
|
#ifdef MULTI_CPU
|
|
ldr r10, =processor
|
|
ldr r4, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
|
|
#else
|
|
ldr r4, =cpu_suspend_size
|
|
#endif
|
|
mov r5, sp @ current virtual SP
|
|
#ifdef CONFIG_VMAP_STACK
|
|
@ Run the suspend code from the overflow stack so we don't have to rely
|
|
@ on vmalloc-to-phys conversions anywhere in the arch suspend code.
|
|
@ The original SP value captured in R5 will be restored on the way out.
|
|
ldr_this_cpu sp, overflow_stack_ptr, r6, r7
|
|
#endif
|
|
add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
|
|
sub sp, sp, r4 @ allocate CPU state on stack
|
|
ldr r3, =sleep_save_sp
|
|
stmfd sp!, {r0, r1} @ save suspend func arg and pointer
|
|
ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
|
|
ALT_SMP(W(nop)) @ don't use adr_l inside ALT_SMP()
|
|
ALT_UP_B(1f)
|
|
adr_l r0, mpidr_hash
|
|
/* This ldmia relies on the memory layout of the mpidr_hash struct */
|
|
ldmia r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
|
|
compute_mpidr_hash r0, r6, r7, r8, r2, r1
|
|
add r3, r3, r0, lsl #2
|
|
1: mov r2, r5 @ virtual SP
|
|
mov r1, r4 @ size of save block
|
|
add r0, sp, #8 @ pointer to save block
|
|
bl __cpu_suspend_save
|
|
badr lr, cpu_suspend_abort
|
|
ldmfd sp!, {r0, pc} @ call suspend fn
|
|
ENDPROC(__cpu_suspend)
|
|
.ltorg
|
|
|
|
cpu_suspend_abort:
|
|
ldmia sp!, {r1 - r3} @ pop phys pgd, virt SP, phys resume fn
|
|
teq r0, #0
|
|
moveq r0, #1 @ force non-zero value
|
|
mov sp, r2
|
|
ldmfd sp!, {r4 - r11, pc}
|
|
ENDPROC(cpu_suspend_abort)
|
|
|
|
/*
|
|
* r0 = control register value
|
|
*/
|
|
.align 5
|
|
.pushsection .idmap.text,"ax"
|
|
ENTRY(cpu_resume_mmu)
|
|
ldr r3, =cpu_resume_after_mmu
|
|
instr_sync
|
|
mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
|
|
mrc p15, 0, r0, c0, c0, 0 @ read id reg
|
|
instr_sync
|
|
mov r0, r0
|
|
mov r0, r0
|
|
ret r3 @ jump to virtual address
|
|
ENDPROC(cpu_resume_mmu)
|
|
.popsection
|
|
cpu_resume_after_mmu:
|
|
#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
|
|
@ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
|
|
@ as the ID map does not cover the vmalloc region.
|
|
mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
|
|
mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
|
|
instr_sync
|
|
#endif
|
|
bl cpu_init @ restore the und/abt/irq banked regs
|
|
#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
|
|
mov r0, sp
|
|
bl kasan_unpoison_task_stack_below
|
|
#endif
|
|
mov r0, #0 @ return zero on success
|
|
ldmfd sp!, {r4 - r11, pc}
|
|
ENDPROC(cpu_resume_after_mmu)
|
|
|
|
.text
|
|
.align
|
|
|
|
#ifdef CONFIG_MCPM
|
|
.arm
|
|
THUMB( .thumb )
|
|
ENTRY(cpu_resume_no_hyp)
|
|
ARM_BE8(setend be) @ ensure we are in BE mode
|
|
b no_hyp
|
|
#endif
|
|
|
|
#ifdef CONFIG_MMU
|
|
.arm
|
|
ENTRY(cpu_resume_arm)
|
|
THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
|
|
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
|
|
THUMB( .thumb ) @ switch to Thumb now.
|
|
THUMB(1: )
|
|
#endif
|
|
|
|
ENTRY(cpu_resume)
|
|
ARM_BE8(setend be) @ ensure we are in BE mode
|
|
#ifdef CONFIG_ARM_VIRT_EXT
|
|
bl __hyp_stub_install_secondary
|
|
#endif
|
|
safe_svcmode_maskall r1
|
|
no_hyp:
|
|
mov r1, #0
|
|
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
|
|
ALT_UP_B(1f)
|
|
adr_l r2, mpidr_hash @ r2 = struct mpidr_hash phys address
|
|
|
|
/*
|
|
* This ldmia relies on the memory layout of the mpidr_hash
|
|
* struct mpidr_hash.
|
|
*/
|
|
ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts
|
|
compute_mpidr_hash r1, r4, r5, r6, r0, r3
|
|
1:
|
|
ldr_l r0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
|
|
ldr r0, [r0, r1, lsl #2]
|
|
|
|
@ load phys pgd, stack, resume fn
|
|
ARM( ldmia r0!, {r1, sp, pc} )
|
|
THUMB( ldmia r0!, {r1, r2, r3} )
|
|
THUMB( mov sp, r2 )
|
|
THUMB( bx r3 )
|
|
ENDPROC(cpu_resume)
|
|
|
|
#ifdef CONFIG_MMU
|
|
ENDPROC(cpu_resume_arm)
|
|
#endif
|
|
#ifdef CONFIG_MCPM
|
|
ENDPROC(cpu_resume_no_hyp)
|
|
#endif
|
|
|
|
.data
|
|
.align 2
|
|
.type sleep_save_sp, #object
|
|
ENTRY(sleep_save_sp)
|
|
.space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp
|