Merge ftrace/for-next

This commit is contained in:
Steven Rostedt (Google) 2025-01-13 09:47:27 -05:00
commit 7132d8572b
6 changed files with 69 additions and 129 deletions

View File

@ -658,7 +658,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)
{
unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1];
int bit;
if (unlikely(ftrace_graph_is_dead()))
goto out;
@ -666,14 +665,9 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
goto out;
if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
parent_ip = ppc_function_entry(return_to_handler);
ftrace_test_recursion_unlock(bit);
out:
arch_ftrace_regs(fregs)->regs.link = parent_ip;
}

View File

@ -790,7 +790,6 @@ static unsigned long
__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
{
unsigned long return_hooker;
int bit;
if (unlikely(ftrace_graph_is_dead()))
goto out;
@ -798,16 +797,11 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
bit = ftrace_test_recursion_trylock(ip, parent);
if (bit < 0)
goto out;
return_hooker = ppc_function_entry(return_to_handler);
if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
parent = return_hooker;
ftrace_test_recursion_unlock(bit);
out:
return parent;
}

View File

@ -615,7 +615,6 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long)&return_to_handler;
int bit;
/*
* When resuming from suspend-to-ram, this function can be indirectly
@ -635,14 +634,8 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
bit = ftrace_test_recursion_trylock(ip, *parent);
if (bit < 0)
return;
if (!function_graph_enter(*parent, ip, frame_pointer, parent))
*parent = return_hooker;
ftrace_test_recursion_unlock(bit);
}
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS

View File

@ -650,8 +650,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
struct ftrace_graph_ent trace;
unsigned long bitmap = 0;
int offset;
int bit;
int i;
bit = ftrace_test_recursion_trylock(func, ret);
if (bit < 0)
return -EBUSY;
trace.func = func;
trace.depth = ++current->curr_ret_depth;
@ -697,12 +702,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
* flag, set that bit always.
*/
set_bitmap(current, offset, bitmap | BIT(0));
ftrace_test_recursion_unlock(bit);
return 0;
out_ret:
current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
out:
current->curr_ret_depth--;
ftrace_test_recursion_unlock(bit);
return -EBUSY;
}

View File

@ -536,24 +536,21 @@ static int function_stat_show(struct seq_file *m, void *v)
{
struct ftrace_profile *rec = v;
char str[KSYM_SYMBOL_LEN];
int ret = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static struct trace_seq s;
unsigned long long avg;
unsigned long long stddev;
#endif
mutex_lock(&ftrace_profile_lock);
guard(mutex)(&ftrace_profile_lock);
/* we raced with function_profile_reset() */
if (unlikely(rec->counter == 0)) {
ret = -EBUSY;
goto out;
}
if (unlikely(rec->counter == 0))
return -EBUSY;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
avg = div64_ul(rec->time, rec->counter);
if (tracing_thresh && (avg < tracing_thresh))
goto out;
return 0;
#endif
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
@ -590,10 +587,8 @@ static int function_stat_show(struct seq_file *m, void *v)
trace_print_seq(m, &s);
#endif
seq_putc(m, '\n');
out:
mutex_unlock(&ftrace_profile_lock);
return ret;
return 0;
}
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@ -789,27 +784,24 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
{
struct ftrace_profile_stat *stat;
struct ftrace_profile *rec;
unsigned long flags;
if (!ftrace_profile_enabled)
return;
local_irq_save(flags);
guard(preempt_notrace)();
stat = this_cpu_ptr(&ftrace_profile_stats);
if (!stat->hash || !ftrace_profile_enabled)
goto out;
return;
rec = ftrace_find_profiled_func(stat, ip);
if (!rec) {
rec = ftrace_profile_alloc(stat, ip);
if (!rec)
goto out;
return;
}
rec->counter++;
out:
local_irq_restore(flags);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@ -856,19 +848,19 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
unsigned long long calltime;
unsigned long long rettime = trace_clock_local();
struct ftrace_profile *rec;
unsigned long flags;
int size;
local_irq_save(flags);
guard(preempt_notrace)();
stat = this_cpu_ptr(&ftrace_profile_stats);
if (!stat->hash || !ftrace_profile_enabled)
goto out;
return;
profile_data = fgraph_retrieve_data(gops->idx, &size);
/* If the calltime was zero'd ignore it */
if (!profile_data || !profile_data->calltime)
goto out;
return;
calltime = rettime - profile_data->calltime;
@ -896,9 +888,6 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
rec->time += calltime;
rec->time_squared += calltime * calltime;
}
out:
local_irq_restore(flags);
}
static struct fgraph_ops fprofiler_ops = {
@ -950,20 +939,16 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
val = !!val;
mutex_lock(&ftrace_profile_lock);
guard(mutex)(&ftrace_profile_lock);
if (ftrace_profile_enabled ^ val) {
if (val) {
ret = ftrace_profile_init();
if (ret < 0) {
cnt = ret;
goto out;
}
if (ret < 0)
return ret;
ret = register_ftrace_profiler();
if (ret < 0) {
cnt = ret;
goto out;
}
if (ret < 0)
return ret;
ftrace_profile_enabled = 1;
} else {
ftrace_profile_enabled = 0;
@ -974,8 +959,6 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
unregister_ftrace_profiler();
}
}
out:
mutex_unlock(&ftrace_profile_lock);
*ppos += cnt;
@ -1675,14 +1658,12 @@ unsigned long ftrace_location(unsigned long ip)
loc = ftrace_location_range(ip, ip);
if (!loc) {
if (!kallsyms_lookup_size_offset(ip, &size, &offset))
goto out;
return 0;
/* map sym+0 to __fentry__ */
if (!offset)
loc = ftrace_location_range(ip, ip + size - 1);
}
out:
return loc;
}
@ -2077,7 +2058,7 @@ rollback:
continue;
if (rec == end)
goto err_out;
return -EBUSY;
in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
@ -2090,7 +2071,6 @@ rollback:
rec->flags |= FTRACE_FL_IPMODIFY;
} while_for_each_ftrace_rec();
err_out:
return -EBUSY;
}
@ -5619,20 +5599,15 @@ static DEFINE_MUTEX(ftrace_cmd_mutex);
__init int register_ftrace_command(struct ftrace_func_command *cmd)
{
struct ftrace_func_command *p;
int ret = 0;
mutex_lock(&ftrace_cmd_mutex);
guard(mutex)(&ftrace_cmd_mutex);
list_for_each_entry(p, &ftrace_commands, list) {
if (strcmp(cmd->name, p->name) == 0) {
ret = -EBUSY;
goto out_unlock;
}
if (strcmp(cmd->name, p->name) == 0)
return -EBUSY;
}
list_add(&cmd->list, &ftrace_commands);
out_unlock:
mutex_unlock(&ftrace_cmd_mutex);
return ret;
return 0;
}
/*
@ -5642,20 +5617,17 @@ __init int register_ftrace_command(struct ftrace_func_command *cmd)
__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
{
struct ftrace_func_command *p, *n;
int ret = -ENODEV;
mutex_lock(&ftrace_cmd_mutex);
guard(mutex)(&ftrace_cmd_mutex);
list_for_each_entry_safe(p, n, &ftrace_commands, list) {
if (strcmp(cmd->name, p->name) == 0) {
ret = 0;
list_del_init(&p->list);
goto out_unlock;
return 0;
}
}
out_unlock:
mutex_unlock(&ftrace_cmd_mutex);
return ret;
return -ENODEV;
}
static int ftrace_process_regex(struct ftrace_iterator *iter,
@ -5665,7 +5637,7 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
struct trace_array *tr = iter->ops->private;
char *func, *command, *next = buff;
struct ftrace_func_command *p;
int ret = -EINVAL;
int ret;
func = strsep(&next, ":");
@ -5682,17 +5654,14 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
command = strsep(&next, ":");
mutex_lock(&ftrace_cmd_mutex);
list_for_each_entry(p, &ftrace_commands, list) {
if (strcmp(p->name, command) == 0) {
ret = p->func(tr, hash, func, command, next, enable);
goto out_unlock;
}
}
out_unlock:
mutex_unlock(&ftrace_cmd_mutex);
guard(mutex)(&ftrace_cmd_mutex);
return ret;
list_for_each_entry(p, &ftrace_commands, list) {
if (strcmp(p->name, command) == 0)
return p->func(tr, hash, func, command, next, enable);
}
return -EINVAL;
}
static ssize_t
@ -5726,12 +5695,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
parser->idx, enable);
trace_parser_clear(parser);
if (ret < 0)
goto out;
return ret;
}
ret = read;
out:
return ret;
return read;
}
ssize_t
@ -8291,7 +8258,7 @@ pid_write(struct file *filp, const char __user *ubuf,
if (!cnt)
return 0;
mutex_lock(&ftrace_lock);
guard(mutex)(&ftrace_lock);
switch (type) {
case TRACE_PIDS:
@ -8307,14 +8274,13 @@ pid_write(struct file *filp, const char __user *ubuf,
lockdep_is_held(&ftrace_lock));
break;
default:
ret = -EINVAL;
WARN_ON_ONCE(1);
goto out;
return -EINVAL;
}
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
if (ret < 0)
goto out;
return ret;
switch (type) {
case TRACE_PIDS:
@ -8343,11 +8309,8 @@ pid_write(struct file *filp, const char __user *ubuf,
ftrace_update_pid_func();
ftrace_startup_all(0);
out:
mutex_unlock(&ftrace_lock);
if (ret > 0)
*ppos += ret;
*ppos += ret;
return ret;
}
@ -8750,17 +8713,17 @@ static int
ftrace_enable_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret = -ENODEV;
int ret;
mutex_lock(&ftrace_lock);
guard(mutex)(&ftrace_lock);
if (unlikely(ftrace_disabled))
goto out;
return -ENODEV;
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
goto out;
return ret;
if (ftrace_enabled) {
@ -8774,8 +8737,7 @@ ftrace_enable_sysctl(const struct ctl_table *table, int write,
} else {
if (is_permanent_ops_registered()) {
ftrace_enabled = true;
ret = -EBUSY;
goto out;
return -EBUSY;
}
/* stopping ftrace calls (just send to ftrace_stub) */
@ -8785,9 +8747,7 @@ ftrace_enable_sysctl(const struct ctl_table *table, int write,
}
last_ftrace_enabled = !!ftrace_enabled;
out:
mutex_unlock(&ftrace_lock);
return ret;
return 0;
}
static struct ctl_table ftrace_sysctls[] = {

View File

@ -181,10 +181,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
struct trace_array *tr = gops->private;
struct trace_array_cpu *data;
struct fgraph_times *ftimes;
unsigned long flags;
unsigned int trace_ctx;
long disabled;
int ret;
int ret = 0;
int cpu;
if (*task_var & TRACE_GRAPH_NOTRACE)
@ -235,25 +234,21 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
if (tracing_thresh)
return 1;
local_irq_save(flags);
preempt_disable_notrace();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
trace_ctx = tracing_gen_ctx_flags(flags);
if (unlikely(IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR))) {
disabled = atomic_read(&data->disabled);
if (likely(!disabled)) {
trace_ctx = tracing_gen_ctx();
if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
unsigned long retaddr = ftrace_graph_top_ret_addr(current);
ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
} else
} else {
ret = __trace_graph_entry(tr, trace, trace_ctx);
} else {
ret = 0;
}
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
preempt_enable_notrace();
return ret;
}
@ -320,7 +315,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
struct trace_array *tr = gops->private;
struct trace_array_cpu *data;
struct fgraph_times *ftimes;
unsigned long flags;
unsigned int trace_ctx;
long disabled;
int size;
@ -341,16 +335,15 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
trace->calltime = ftimes->calltime;
local_irq_save(flags);
preempt_disable_notrace();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
trace_ctx = tracing_gen_ctx_flags(flags);
disabled = atomic_read(&data->disabled);
if (likely(!disabled)) {
trace_ctx = tracing_gen_ctx();
__trace_graph_return(tr, trace, trace_ctx);
}
atomic_dec(&data->disabled);
local_irq_restore(flags);
preempt_enable_notrace();
}
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,