mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 13:34:30 +00:00
s390/tracing: Enable HAVE_FTRACE_GRAPH_FUNC
Add ftrace_graph_func() which is required for fprobe to access registers. This also eliminates the need for calling prepare_ftrace_return() from ftrace_caller(). Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com> Cc: Florent Revest <revest@chromium.org> Cc: Martin KaFai Lau <martin.lau@linux.dev> Cc: bpf <bpf@vger.kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Alan Maguire <alan.maguire@oracle.com> Cc: Mark Rutland <mark.rutland@arm.com> Link: https://lore.kernel.org/173519002875.391279.7060964632119674159.stgit@devnote2 Signed-off-by: Sven Schnelle <svens@linux.ibm.com> Acked-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
a762e9267d
commit
7495e179b4
@ -190,6 +190,7 @@ config S390
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select HAVE_GUP_FAST
|
||||
select HAVE_FENTRY
|
||||
select HAVE_FTRACE_GRAPH_FUNC
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||
select HAVE_FUNCTION_ERROR_INJECTION
|
||||
|
@ -39,6 +39,7 @@ struct dyn_arch_ftrace { };
|
||||
|
||||
struct module;
|
||||
struct dyn_ftrace;
|
||||
struct ftrace_ops;
|
||||
|
||||
bool ftrace_need_init_nop(void);
|
||||
#define ftrace_need_init_nop ftrace_need_init_nop
|
||||
@ -122,6 +123,10 @@ static inline bool arch_syscall_match_sym_name(const char *sym,
|
||||
return !strcmp(sym + 7, name) || !strcmp(sym + 8, name);
|
||||
}
|
||||
|
||||
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *op, struct ftrace_regs *fregs);
|
||||
#define ftrace_graph_func ftrace_graph_func
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
@ -41,7 +41,6 @@ void do_restart(void *arg);
|
||||
void __init startup_init(void);
|
||||
void die(struct pt_regs *regs, const char *str);
|
||||
int setup_profiling_timer(unsigned int multiplier);
|
||||
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long sp, unsigned long ip);
|
||||
|
||||
struct s390_mmap_arg_struct;
|
||||
struct fadvise64_64_args;
|
||||
|
@ -261,43 +261,23 @@ void ftrace_arch_code_modify_post_process(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/*
|
||||
* Hook the return address and push it in the stack of return addresses
|
||||
* in current thread info.
|
||||
*/
|
||||
unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
|
||||
unsigned long ip)
|
||||
|
||||
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *op, struct ftrace_regs *fregs)
|
||||
{
|
||||
unsigned long *parent = &arch_ftrace_regs(fregs)->regs.gprs[14];
|
||||
int bit;
|
||||
|
||||
if (unlikely(ftrace_graph_is_dead()))
|
||||
goto out;
|
||||
return;
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
goto out;
|
||||
ip -= MCOUNT_INSN_SIZE;
|
||||
if (!function_graph_enter(ra, ip, 0, (void *) sp))
|
||||
ra = (unsigned long) return_to_handler;
|
||||
out:
|
||||
return ra;
|
||||
}
|
||||
NOKPROBE_SYMBOL(prepare_ftrace_return);
|
||||
|
||||
/*
|
||||
* Patch the kernel code at ftrace_graph_caller location. The instruction
|
||||
* there is branch relative on condition. To enable the ftrace graph code
|
||||
* block, we simply patch the mask field of the instruction to zero and
|
||||
* turn the instruction into a nop.
|
||||
* To disable the ftrace graph code the mask field will be patched to
|
||||
* all ones, which turns the instruction into an unconditional branch.
|
||||
*/
|
||||
int ftrace_enable_ftrace_graph_caller(void)
|
||||
{
|
||||
/* Expect brc 0xf,... */
|
||||
return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
|
||||
}
|
||||
|
||||
int ftrace_disable_ftrace_graph_caller(void)
|
||||
{
|
||||
/* Expect brc 0x0,... */
|
||||
return ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
|
||||
return;
|
||||
bit = ftrace_test_recursion_trylock(ip, *parent);
|
||||
if (bit < 0)
|
||||
return;
|
||||
if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs))
|
||||
*parent = (unsigned long)&return_to_handler;
|
||||
ftrace_test_recursion_unlock(bit);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
@ -104,17 +104,6 @@ SYM_CODE_START(ftrace_common)
|
||||
lgr %r3,%r14
|
||||
la %r5,STACK_FREGS(%r15)
|
||||
BASR_EX %r14,%r1
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
# The j instruction gets runtime patched to a nop instruction.
|
||||
# See ftrace_enable_ftrace_graph_caller.
|
||||
SYM_INNER_LABEL(ftrace_graph_caller, SYM_L_GLOBAL)
|
||||
j .Lftrace_graph_caller_end
|
||||
lmg %r2,%r3,(STACK_FREGS_PTREGS_GPRS+14*8)(%r15)
|
||||
lg %r4,(STACK_FREGS_PTREGS_PSW+8)(%r15)
|
||||
brasl %r14,prepare_ftrace_return
|
||||
stg %r2,(STACK_FREGS_PTREGS_GPRS+14*8)(%r15)
|
||||
.Lftrace_graph_caller_end:
|
||||
#endif
|
||||
lg %r0,(STACK_FREGS_PTREGS_PSW+8)(%r15)
|
||||
#ifdef MARCH_HAS_Z196_FEATURES
|
||||
ltg %r1,STACK_FREGS_PTREGS_ORIG_GPR2(%r15)
|
||||
|
Loading…
x
Reference in New Issue
Block a user