mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-11 07:39:47 +00:00
x86/ibt,ftrace: Make function-graph play nice
Return trampoline must not use indirect branch to return; while this preserves the RSB, it is fundamentally incompatible with IBT. Instead use a retpoline like ROP gadget that defeats IBT while not unbalancing the RSB. And since ftrace_stub is no longer a plain RET, don't use it to copy from. Since RET is a trivial instruction, poke it directly. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Josh Poimboeuf <jpoimboe@redhat.com> Link: https://lore.kernel.org/r/20220308154318.347296408@infradead.org
This commit is contained in:
parent
d15cb3dab1
commit
e52fc2cf3f
@ -316,12 +316,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
||||
unsigned long offset;
|
||||
unsigned long npages;
|
||||
unsigned long size;
|
||||
unsigned long retq;
|
||||
unsigned long *ptr;
|
||||
void *trampoline;
|
||||
void *ip;
|
||||
/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
|
||||
unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
|
||||
unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
|
||||
union ftrace_op_code_union op_ptr;
|
||||
int ret;
|
||||
|
||||
@ -359,12 +359,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
||||
goto fail;
|
||||
|
||||
ip = trampoline + size;
|
||||
|
||||
/* The trampoline ends with ret(q) */
|
||||
retq = (unsigned long)ftrace_stub;
|
||||
ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
|
||||
if (WARN_ON(ret < 0))
|
||||
goto fail;
|
||||
memcpy(ip, retq, RET_SIZE);
|
||||
|
||||
/* No need to test direct calls on created trampolines */
|
||||
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
|
||||
|
@ -176,10 +176,10 @@ SYM_FUNC_END(ftrace_caller);
|
||||
SYM_FUNC_START(ftrace_epilogue)
|
||||
/*
|
||||
* This is weak to keep gas from relaxing the jumps.
|
||||
* It is also used to copy the RET for trampolines.
|
||||
*/
|
||||
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
|
||||
UNWIND_HINT_FUNC
|
||||
ENDBR
|
||||
RET
|
||||
SYM_FUNC_END(ftrace_epilogue)
|
||||
|
||||
@ -284,6 +284,7 @@ SYM_FUNC_START(__fentry__)
|
||||
jnz trace
|
||||
|
||||
SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
|
||||
ENDBR
|
||||
RET
|
||||
|
||||
trace:
|
||||
@ -307,7 +308,7 @@ EXPORT_SYMBOL(__fentry__)
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
SYM_FUNC_START(return_to_handler)
|
||||
subq $24, %rsp
|
||||
subq $16, %rsp
|
||||
|
||||
/* Save the return values */
|
||||
movq %rax, (%rsp)
|
||||
@ -319,7 +320,19 @@ SYM_FUNC_START(return_to_handler)
|
||||
movq %rax, %rdi
|
||||
movq 8(%rsp), %rdx
|
||||
movq (%rsp), %rax
|
||||
addq $24, %rsp
|
||||
JMP_NOSPEC rdi
|
||||
|
||||
addq $16, %rsp
|
||||
/*
|
||||
* Jump back to the old return address. This cannot be JMP_NOSPEC rdi
|
||||
* since IBT would demand that contain ENDBR, which simply isn't so for
|
||||
* return addresses. Use a retpoline here to keep the RSB balanced.
|
||||
*/
|
||||
ANNOTATE_INTRA_FUNCTION_CALL
|
||||
call .Ldo_rop
|
||||
int3
|
||||
.Ldo_rop:
|
||||
mov %rdi, (%rsp)
|
||||
UNWIND_HINT_FUNC
|
||||
RET
|
||||
SYM_FUNC_END(return_to_handler)
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user