mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
cb8a2ef084
The kernel CONFIG_UNWINDER_ORC option enables the ORC unwinder, which is similar in concept to a DWARF unwinder. The difference is that the format of the ORC data is much simpler than DWARF, which in turn allows the ORC unwinder to be much simpler and faster. The ORC data consists of unwind tables which are generated by objtool. After analyzing all the code paths of a .o file, it determines information about the stack state at each instruction address in the file and outputs that information to the .orc_unwind and .orc_unwind_ip sections. The per-object ORC sections are combined at link time and are sorted and post-processed at boot time. The unwinder uses the resulting data to correlate instruction addresses with their stack states at run time. Most of the logic are similar with x86, in order to get ra info before ra is saved into stack, add ra_reg and ra_offset into orc_entry. At the same time, modify some arch-specific code to silence the objtool warnings. Co-developed-by: Jinyang He <hejinyang@loongson.cn> Signed-off-by: Jinyang He <hejinyang@loongson.cn> Co-developed-by: Youling Tang <tangyouling@loongson.cn> Signed-off-by: Youling Tang <tangyouling@loongson.cn> Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
210 lines
3.6 KiB
ArmAsm
210 lines
3.6 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
*/
|
|
|
|
#include <linux/export.h>
|
|
#include <asm/alternative-asm.h>
|
|
#include <asm/asm.h>
|
|
#include <asm/asmmacro.h>
|
|
#include <asm/asm-extable.h>
|
|
#include <asm/cpu.h>
|
|
#include <asm/regdef.h>
|
|
#include <asm/unwind_hints.h>
|
|
|
|
SYM_FUNC_START(__clear_user)
|
|
/*
|
|
* Some CPUs support hardware unaligned access
|
|
*/
|
|
ALTERNATIVE "b __clear_user_generic", \
|
|
"b __clear_user_fast", CPU_FEATURE_UAL
|
|
SYM_FUNC_END(__clear_user)
|
|
|
|
EXPORT_SYMBOL(__clear_user)
|
|
|
|
/*
|
|
* unsigned long __clear_user_generic(void *addr, size_t size)
|
|
*
|
|
* a0: addr
|
|
* a1: size
|
|
*/
|
|
SYM_FUNC_START(__clear_user_generic)
|
|
beqz a1, 2f
|
|
|
|
1: st.b zero, a0, 0
|
|
addi.d a0, a0, 1
|
|
addi.d a1, a1, -1
|
|
bgtz a1, 1b
|
|
|
|
2: move a0, a1
|
|
jr ra
|
|
|
|
_asm_extable 1b, 2b
|
|
SYM_FUNC_END(__clear_user_generic)
|
|
|
|
/*
|
|
* unsigned long __clear_user_fast(void *addr, unsigned long size)
|
|
*
|
|
* a0: addr
|
|
* a1: size
|
|
*/
|
|
SYM_FUNC_START(__clear_user_fast)
|
|
sltui t0, a1, 9
|
|
bnez t0, .Lsmall
|
|
|
|
add.d a2, a0, a1
|
|
0: st.d zero, a0, 0
|
|
|
|
/* align up address */
|
|
addi.d a0, a0, 8
|
|
bstrins.d a0, zero, 2, 0
|
|
|
|
addi.d a3, a2, -64
|
|
bgeu a0, a3, .Llt64
|
|
|
|
/* set 64 bytes at a time */
|
|
.Lloop64:
|
|
1: st.d zero, a0, 0
|
|
2: st.d zero, a0, 8
|
|
3: st.d zero, a0, 16
|
|
4: st.d zero, a0, 24
|
|
5: st.d zero, a0, 32
|
|
6: st.d zero, a0, 40
|
|
7: st.d zero, a0, 48
|
|
8: st.d zero, a0, 56
|
|
addi.d a0, a0, 64
|
|
bltu a0, a3, .Lloop64
|
|
|
|
/* set the remaining bytes */
|
|
.Llt64:
|
|
addi.d a3, a2, -32
|
|
bgeu a0, a3, .Llt32
|
|
9: st.d zero, a0, 0
|
|
10: st.d zero, a0, 8
|
|
11: st.d zero, a0, 16
|
|
12: st.d zero, a0, 24
|
|
addi.d a0, a0, 32
|
|
|
|
.Llt32:
|
|
addi.d a3, a2, -16
|
|
bgeu a0, a3, .Llt16
|
|
13: st.d zero, a0, 0
|
|
14: st.d zero, a0, 8
|
|
addi.d a0, a0, 16
|
|
|
|
.Llt16:
|
|
addi.d a3, a2, -8
|
|
bgeu a0, a3, .Llt8
|
|
15: st.d zero, a0, 0
|
|
addi.d a0, a0, 8
|
|
|
|
.Llt8:
|
|
16: st.d zero, a2, -8
|
|
|
|
/* return */
|
|
move a0, zero
|
|
jr ra
|
|
|
|
.align 4
|
|
.Lsmall:
|
|
pcaddi t0, 4
|
|
slli.d a2, a1, 4
|
|
add.d t0, t0, a2
|
|
jr t0
|
|
|
|
.align 4
|
|
move a0, zero
|
|
jr ra
|
|
|
|
.align 4
|
|
17: st.b zero, a0, 0
|
|
move a0, zero
|
|
jr ra
|
|
|
|
.align 4
|
|
18: st.h zero, a0, 0
|
|
move a0, zero
|
|
jr ra
|
|
|
|
.align 4
|
|
19: st.h zero, a0, 0
|
|
20: st.b zero, a0, 2
|
|
move a0, zero
|
|
jr ra
|
|
|
|
.align 4
|
|
21: st.w zero, a0, 0
|
|
move a0, zero
|
|
jr ra
|
|
|
|
.align 4
|
|
22: st.w zero, a0, 0
|
|
23: st.b zero, a0, 4
|
|
move a0, zero
|
|
jr ra
|
|
|
|
.align 4
|
|
24: st.w zero, a0, 0
|
|
25: st.h zero, a0, 4
|
|
move a0, zero
|
|
jr ra
|
|
|
|
.align 4
|
|
26: st.w zero, a0, 0
|
|
27: st.w zero, a0, 3
|
|
move a0, zero
|
|
jr ra
|
|
|
|
.align 4
|
|
28: st.d zero, a0, 0
|
|
move a0, zero
|
|
jr ra
|
|
|
|
/* fixup and ex_table */
|
|
.Llarge_fixup:
|
|
sub.d a1, a2, a0
|
|
|
|
.Lsmall_fixup:
|
|
29: st.b zero, a0, 0
|
|
addi.d a0, a0, 1
|
|
addi.d a1, a1, -1
|
|
bgt a1, zero, 29b
|
|
|
|
.Lexit:
|
|
move a0, a1
|
|
jr ra
|
|
|
|
_asm_extable 0b, .Lsmall_fixup
|
|
_asm_extable 1b, .Llarge_fixup
|
|
_asm_extable 2b, .Llarge_fixup
|
|
_asm_extable 3b, .Llarge_fixup
|
|
_asm_extable 4b, .Llarge_fixup
|
|
_asm_extable 5b, .Llarge_fixup
|
|
_asm_extable 6b, .Llarge_fixup
|
|
_asm_extable 7b, .Llarge_fixup
|
|
_asm_extable 8b, .Llarge_fixup
|
|
_asm_extable 9b, .Llarge_fixup
|
|
_asm_extable 10b, .Llarge_fixup
|
|
_asm_extable 11b, .Llarge_fixup
|
|
_asm_extable 12b, .Llarge_fixup
|
|
_asm_extable 13b, .Llarge_fixup
|
|
_asm_extable 14b, .Llarge_fixup
|
|
_asm_extable 15b, .Llarge_fixup
|
|
_asm_extable 16b, .Llarge_fixup
|
|
_asm_extable 17b, .Lexit
|
|
_asm_extable 18b, .Lsmall_fixup
|
|
_asm_extable 19b, .Lsmall_fixup
|
|
_asm_extable 20b, .Lsmall_fixup
|
|
_asm_extable 21b, .Lsmall_fixup
|
|
_asm_extable 22b, .Lsmall_fixup
|
|
_asm_extable 23b, .Lsmall_fixup
|
|
_asm_extable 24b, .Lsmall_fixup
|
|
_asm_extable 25b, .Lsmall_fixup
|
|
_asm_extable 26b, .Lsmall_fixup
|
|
_asm_extable 27b, .Lsmall_fixup
|
|
_asm_extable 28b, .Lsmall_fixup
|
|
_asm_extable 29b, .Lexit
|
|
SYM_FUNC_END(__clear_user_fast)
|
|
|
|
STACK_FRAME_NON_STANDARD __clear_user_fast
|