mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 02:05:33 +00:00
Merge ftrace/for-next
This commit is contained in:
commit
7132d8572b
@ -658,7 +658,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
|||||||
struct ftrace_ops *op, struct ftrace_regs *fregs)
|
struct ftrace_ops *op, struct ftrace_regs *fregs)
|
||||||
{
|
{
|
||||||
unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1];
|
unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1];
|
||||||
int bit;
|
|
||||||
|
|
||||||
if (unlikely(ftrace_graph_is_dead()))
|
if (unlikely(ftrace_graph_is_dead()))
|
||||||
goto out;
|
goto out;
|
||||||
@ -666,14 +665,9 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
|||||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
bit = ftrace_test_recursion_trylock(ip, parent_ip);
|
|
||||||
if (bit < 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
|
if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
|
||||||
parent_ip = ppc_function_entry(return_to_handler);
|
parent_ip = ppc_function_entry(return_to_handler);
|
||||||
|
|
||||||
ftrace_test_recursion_unlock(bit);
|
|
||||||
out:
|
out:
|
||||||
arch_ftrace_regs(fregs)->regs.link = parent_ip;
|
arch_ftrace_regs(fregs)->regs.link = parent_ip;
|
||||||
}
|
}
|
||||||
|
@ -790,7 +790,6 @@ static unsigned long
|
|||||||
__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
|
__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
|
||||||
{
|
{
|
||||||
unsigned long return_hooker;
|
unsigned long return_hooker;
|
||||||
int bit;
|
|
||||||
|
|
||||||
if (unlikely(ftrace_graph_is_dead()))
|
if (unlikely(ftrace_graph_is_dead()))
|
||||||
goto out;
|
goto out;
|
||||||
@ -798,16 +797,11 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp
|
|||||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
bit = ftrace_test_recursion_trylock(ip, parent);
|
|
||||||
if (bit < 0)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
return_hooker = ppc_function_entry(return_to_handler);
|
return_hooker = ppc_function_entry(return_to_handler);
|
||||||
|
|
||||||
if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
|
if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
|
||||||
parent = return_hooker;
|
parent = return_hooker;
|
||||||
|
|
||||||
ftrace_test_recursion_unlock(bit);
|
|
||||||
out:
|
out:
|
||||||
return parent;
|
return parent;
|
||||||
}
|
}
|
||||||
|
@ -615,7 +615,6 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
|
|||||||
unsigned long frame_pointer)
|
unsigned long frame_pointer)
|
||||||
{
|
{
|
||||||
unsigned long return_hooker = (unsigned long)&return_to_handler;
|
unsigned long return_hooker = (unsigned long)&return_to_handler;
|
||||||
int bit;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When resuming from suspend-to-ram, this function can be indirectly
|
* When resuming from suspend-to-ram, this function can be indirectly
|
||||||
@ -635,14 +634,8 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
|
|||||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bit = ftrace_test_recursion_trylock(ip, *parent);
|
|
||||||
if (bit < 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!function_graph_enter(*parent, ip, frame_pointer, parent))
|
if (!function_graph_enter(*parent, ip, frame_pointer, parent))
|
||||||
*parent = return_hooker;
|
*parent = return_hooker;
|
||||||
|
|
||||||
ftrace_test_recursion_unlock(bit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
|
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
|
||||||
|
@ -650,8 +650,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
|
|||||||
struct ftrace_graph_ent trace;
|
struct ftrace_graph_ent trace;
|
||||||
unsigned long bitmap = 0;
|
unsigned long bitmap = 0;
|
||||||
int offset;
|
int offset;
|
||||||
|
int bit;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
bit = ftrace_test_recursion_trylock(func, ret);
|
||||||
|
if (bit < 0)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
trace.func = func;
|
trace.func = func;
|
||||||
trace.depth = ++current->curr_ret_depth;
|
trace.depth = ++current->curr_ret_depth;
|
||||||
|
|
||||||
@ -697,12 +702,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
|
|||||||
* flag, set that bit always.
|
* flag, set that bit always.
|
||||||
*/
|
*/
|
||||||
set_bitmap(current, offset, bitmap | BIT(0));
|
set_bitmap(current, offset, bitmap | BIT(0));
|
||||||
|
ftrace_test_recursion_unlock(bit);
|
||||||
return 0;
|
return 0;
|
||||||
out_ret:
|
out_ret:
|
||||||
current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
|
current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
|
||||||
out:
|
out:
|
||||||
current->curr_ret_depth--;
|
current->curr_ret_depth--;
|
||||||
|
ftrace_test_recursion_unlock(bit);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -536,24 +536,21 @@ static int function_stat_show(struct seq_file *m, void *v)
|
|||||||
{
|
{
|
||||||
struct ftrace_profile *rec = v;
|
struct ftrace_profile *rec = v;
|
||||||
char str[KSYM_SYMBOL_LEN];
|
char str[KSYM_SYMBOL_LEN];
|
||||||
int ret = 0;
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
static struct trace_seq s;
|
static struct trace_seq s;
|
||||||
unsigned long long avg;
|
unsigned long long avg;
|
||||||
unsigned long long stddev;
|
unsigned long long stddev;
|
||||||
#endif
|
#endif
|
||||||
mutex_lock(&ftrace_profile_lock);
|
guard(mutex)(&ftrace_profile_lock);
|
||||||
|
|
||||||
/* we raced with function_profile_reset() */
|
/* we raced with function_profile_reset() */
|
||||||
if (unlikely(rec->counter == 0)) {
|
if (unlikely(rec->counter == 0))
|
||||||
ret = -EBUSY;
|
return -EBUSY;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
avg = div64_ul(rec->time, rec->counter);
|
avg = div64_ul(rec->time, rec->counter);
|
||||||
if (tracing_thresh && (avg < tracing_thresh))
|
if (tracing_thresh && (avg < tracing_thresh))
|
||||||
goto out;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
|
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
|
||||||
@ -590,10 +587,8 @@ static int function_stat_show(struct seq_file *m, void *v)
|
|||||||
trace_print_seq(m, &s);
|
trace_print_seq(m, &s);
|
||||||
#endif
|
#endif
|
||||||
seq_putc(m, '\n');
|
seq_putc(m, '\n');
|
||||||
out:
|
|
||||||
mutex_unlock(&ftrace_profile_lock);
|
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
|
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
|
||||||
@ -789,27 +784,24 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
|
|||||||
{
|
{
|
||||||
struct ftrace_profile_stat *stat;
|
struct ftrace_profile_stat *stat;
|
||||||
struct ftrace_profile *rec;
|
struct ftrace_profile *rec;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (!ftrace_profile_enabled)
|
if (!ftrace_profile_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
local_irq_save(flags);
|
guard(preempt_notrace)();
|
||||||
|
|
||||||
stat = this_cpu_ptr(&ftrace_profile_stats);
|
stat = this_cpu_ptr(&ftrace_profile_stats);
|
||||||
if (!stat->hash || !ftrace_profile_enabled)
|
if (!stat->hash || !ftrace_profile_enabled)
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
rec = ftrace_find_profiled_func(stat, ip);
|
rec = ftrace_find_profiled_func(stat, ip);
|
||||||
if (!rec) {
|
if (!rec) {
|
||||||
rec = ftrace_profile_alloc(stat, ip);
|
rec = ftrace_profile_alloc(stat, ip);
|
||||||
if (!rec)
|
if (!rec)
|
||||||
goto out;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
rec->counter++;
|
rec->counter++;
|
||||||
out:
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||||
@ -856,19 +848,19 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
|
|||||||
unsigned long long calltime;
|
unsigned long long calltime;
|
||||||
unsigned long long rettime = trace_clock_local();
|
unsigned long long rettime = trace_clock_local();
|
||||||
struct ftrace_profile *rec;
|
struct ftrace_profile *rec;
|
||||||
unsigned long flags;
|
|
||||||
int size;
|
int size;
|
||||||
|
|
||||||
local_irq_save(flags);
|
guard(preempt_notrace)();
|
||||||
|
|
||||||
stat = this_cpu_ptr(&ftrace_profile_stats);
|
stat = this_cpu_ptr(&ftrace_profile_stats);
|
||||||
if (!stat->hash || !ftrace_profile_enabled)
|
if (!stat->hash || !ftrace_profile_enabled)
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
profile_data = fgraph_retrieve_data(gops->idx, &size);
|
profile_data = fgraph_retrieve_data(gops->idx, &size);
|
||||||
|
|
||||||
/* If the calltime was zero'd ignore it */
|
/* If the calltime was zero'd ignore it */
|
||||||
if (!profile_data || !profile_data->calltime)
|
if (!profile_data || !profile_data->calltime)
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
calltime = rettime - profile_data->calltime;
|
calltime = rettime - profile_data->calltime;
|
||||||
|
|
||||||
@ -896,9 +888,6 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
|
|||||||
rec->time += calltime;
|
rec->time += calltime;
|
||||||
rec->time_squared += calltime * calltime;
|
rec->time_squared += calltime * calltime;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct fgraph_ops fprofiler_ops = {
|
static struct fgraph_ops fprofiler_ops = {
|
||||||
@ -950,20 +939,16 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
|
|||||||
|
|
||||||
val = !!val;
|
val = !!val;
|
||||||
|
|
||||||
mutex_lock(&ftrace_profile_lock);
|
guard(mutex)(&ftrace_profile_lock);
|
||||||
if (ftrace_profile_enabled ^ val) {
|
if (ftrace_profile_enabled ^ val) {
|
||||||
if (val) {
|
if (val) {
|
||||||
ret = ftrace_profile_init();
|
ret = ftrace_profile_init();
|
||||||
if (ret < 0) {
|
if (ret < 0)
|
||||||
cnt = ret;
|
return ret;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = register_ftrace_profiler();
|
ret = register_ftrace_profiler();
|
||||||
if (ret < 0) {
|
if (ret < 0)
|
||||||
cnt = ret;
|
return ret;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
ftrace_profile_enabled = 1;
|
ftrace_profile_enabled = 1;
|
||||||
} else {
|
} else {
|
||||||
ftrace_profile_enabled = 0;
|
ftrace_profile_enabled = 0;
|
||||||
@ -974,8 +959,6 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
|
|||||||
unregister_ftrace_profiler();
|
unregister_ftrace_profiler();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out:
|
|
||||||
mutex_unlock(&ftrace_profile_lock);
|
|
||||||
|
|
||||||
*ppos += cnt;
|
*ppos += cnt;
|
||||||
|
|
||||||
@ -1675,14 +1658,12 @@ unsigned long ftrace_location(unsigned long ip)
|
|||||||
loc = ftrace_location_range(ip, ip);
|
loc = ftrace_location_range(ip, ip);
|
||||||
if (!loc) {
|
if (!loc) {
|
||||||
if (!kallsyms_lookup_size_offset(ip, &size, &offset))
|
if (!kallsyms_lookup_size_offset(ip, &size, &offset))
|
||||||
goto out;
|
return 0;
|
||||||
|
|
||||||
/* map sym+0 to __fentry__ */
|
/* map sym+0 to __fentry__ */
|
||||||
if (!offset)
|
if (!offset)
|
||||||
loc = ftrace_location_range(ip, ip + size - 1);
|
loc = ftrace_location_range(ip, ip + size - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
return loc;
|
return loc;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2077,7 +2058,7 @@ rollback:
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (rec == end)
|
if (rec == end)
|
||||||
goto err_out;
|
return -EBUSY;
|
||||||
|
|
||||||
in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
|
in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
|
||||||
in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
|
in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
|
||||||
@ -2090,7 +2071,6 @@ rollback:
|
|||||||
rec->flags |= FTRACE_FL_IPMODIFY;
|
rec->flags |= FTRACE_FL_IPMODIFY;
|
||||||
} while_for_each_ftrace_rec();
|
} while_for_each_ftrace_rec();
|
||||||
|
|
||||||
err_out:
|
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5619,20 +5599,15 @@ static DEFINE_MUTEX(ftrace_cmd_mutex);
|
|||||||
__init int register_ftrace_command(struct ftrace_func_command *cmd)
|
__init int register_ftrace_command(struct ftrace_func_command *cmd)
|
||||||
{
|
{
|
||||||
struct ftrace_func_command *p;
|
struct ftrace_func_command *p;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
mutex_lock(&ftrace_cmd_mutex);
|
guard(mutex)(&ftrace_cmd_mutex);
|
||||||
list_for_each_entry(p, &ftrace_commands, list) {
|
list_for_each_entry(p, &ftrace_commands, list) {
|
||||||
if (strcmp(cmd->name, p->name) == 0) {
|
if (strcmp(cmd->name, p->name) == 0)
|
||||||
ret = -EBUSY;
|
return -EBUSY;
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
list_add(&cmd->list, &ftrace_commands);
|
list_add(&cmd->list, &ftrace_commands);
|
||||||
out_unlock:
|
|
||||||
mutex_unlock(&ftrace_cmd_mutex);
|
|
||||||
|
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -5642,20 +5617,17 @@ __init int register_ftrace_command(struct ftrace_func_command *cmd)
|
|||||||
__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
|
__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
|
||||||
{
|
{
|
||||||
struct ftrace_func_command *p, *n;
|
struct ftrace_func_command *p, *n;
|
||||||
int ret = -ENODEV;
|
|
||||||
|
|
||||||
mutex_lock(&ftrace_cmd_mutex);
|
guard(mutex)(&ftrace_cmd_mutex);
|
||||||
|
|
||||||
list_for_each_entry_safe(p, n, &ftrace_commands, list) {
|
list_for_each_entry_safe(p, n, &ftrace_commands, list) {
|
||||||
if (strcmp(cmd->name, p->name) == 0) {
|
if (strcmp(cmd->name, p->name) == 0) {
|
||||||
ret = 0;
|
|
||||||
list_del_init(&p->list);
|
list_del_init(&p->list);
|
||||||
goto out_unlock;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out_unlock:
|
|
||||||
mutex_unlock(&ftrace_cmd_mutex);
|
|
||||||
|
|
||||||
return ret;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ftrace_process_regex(struct ftrace_iterator *iter,
|
static int ftrace_process_regex(struct ftrace_iterator *iter,
|
||||||
@ -5665,7 +5637,7 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
|
|||||||
struct trace_array *tr = iter->ops->private;
|
struct trace_array *tr = iter->ops->private;
|
||||||
char *func, *command, *next = buff;
|
char *func, *command, *next = buff;
|
||||||
struct ftrace_func_command *p;
|
struct ftrace_func_command *p;
|
||||||
int ret = -EINVAL;
|
int ret;
|
||||||
|
|
||||||
func = strsep(&next, ":");
|
func = strsep(&next, ":");
|
||||||
|
|
||||||
@ -5682,17 +5654,14 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
|
|||||||
|
|
||||||
command = strsep(&next, ":");
|
command = strsep(&next, ":");
|
||||||
|
|
||||||
mutex_lock(&ftrace_cmd_mutex);
|
guard(mutex)(&ftrace_cmd_mutex);
|
||||||
list_for_each_entry(p, &ftrace_commands, list) {
|
|
||||||
if (strcmp(p->name, command) == 0) {
|
|
||||||
ret = p->func(tr, hash, func, command, next, enable);
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
out_unlock:
|
|
||||||
mutex_unlock(&ftrace_cmd_mutex);
|
|
||||||
|
|
||||||
return ret;
|
list_for_each_entry(p, &ftrace_commands, list) {
|
||||||
|
if (strcmp(p->name, command) == 0)
|
||||||
|
return p->func(tr, hash, func, command, next, enable);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
@ -5726,12 +5695,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
|
|||||||
parser->idx, enable);
|
parser->idx, enable);
|
||||||
trace_parser_clear(parser);
|
trace_parser_clear(parser);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = read;
|
return read;
|
||||||
out:
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t
|
ssize_t
|
||||||
@ -8291,7 +8258,7 @@ pid_write(struct file *filp, const char __user *ubuf,
|
|||||||
if (!cnt)
|
if (!cnt)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&ftrace_lock);
|
guard(mutex)(&ftrace_lock);
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case TRACE_PIDS:
|
case TRACE_PIDS:
|
||||||
@ -8307,14 +8274,13 @@ pid_write(struct file *filp, const char __user *ubuf,
|
|||||||
lockdep_is_held(&ftrace_lock));
|
lockdep_is_held(&ftrace_lock));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ret = -EINVAL;
|
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
goto out;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
|
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
return ret;
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case TRACE_PIDS:
|
case TRACE_PIDS:
|
||||||
@ -8343,11 +8309,8 @@ pid_write(struct file *filp, const char __user *ubuf,
|
|||||||
|
|
||||||
ftrace_update_pid_func();
|
ftrace_update_pid_func();
|
||||||
ftrace_startup_all(0);
|
ftrace_startup_all(0);
|
||||||
out:
|
|
||||||
mutex_unlock(&ftrace_lock);
|
|
||||||
|
|
||||||
if (ret > 0)
|
*ppos += ret;
|
||||||
*ppos += ret;
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -8750,17 +8713,17 @@ static int
|
|||||||
ftrace_enable_sysctl(const struct ctl_table *table, int write,
|
ftrace_enable_sysctl(const struct ctl_table *table, int write,
|
||||||
void *buffer, size_t *lenp, loff_t *ppos)
|
void *buffer, size_t *lenp, loff_t *ppos)
|
||||||
{
|
{
|
||||||
int ret = -ENODEV;
|
int ret;
|
||||||
|
|
||||||
mutex_lock(&ftrace_lock);
|
guard(mutex)(&ftrace_lock);
|
||||||
|
|
||||||
if (unlikely(ftrace_disabled))
|
if (unlikely(ftrace_disabled))
|
||||||
goto out;
|
return -ENODEV;
|
||||||
|
|
||||||
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||||
|
|
||||||
if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
|
if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
|
||||||
goto out;
|
return ret;
|
||||||
|
|
||||||
if (ftrace_enabled) {
|
if (ftrace_enabled) {
|
||||||
|
|
||||||
@ -8774,8 +8737,7 @@ ftrace_enable_sysctl(const struct ctl_table *table, int write,
|
|||||||
} else {
|
} else {
|
||||||
if (is_permanent_ops_registered()) {
|
if (is_permanent_ops_registered()) {
|
||||||
ftrace_enabled = true;
|
ftrace_enabled = true;
|
||||||
ret = -EBUSY;
|
return -EBUSY;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* stopping ftrace calls (just send to ftrace_stub) */
|
/* stopping ftrace calls (just send to ftrace_stub) */
|
||||||
@ -8785,9 +8747,7 @@ ftrace_enable_sysctl(const struct ctl_table *table, int write,
|
|||||||
}
|
}
|
||||||
|
|
||||||
last_ftrace_enabled = !!ftrace_enabled;
|
last_ftrace_enabled = !!ftrace_enabled;
|
||||||
out:
|
return 0;
|
||||||
mutex_unlock(&ftrace_lock);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ctl_table ftrace_sysctls[] = {
|
static struct ctl_table ftrace_sysctls[] = {
|
||||||
|
@ -181,10 +181,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
|
|||||||
struct trace_array *tr = gops->private;
|
struct trace_array *tr = gops->private;
|
||||||
struct trace_array_cpu *data;
|
struct trace_array_cpu *data;
|
||||||
struct fgraph_times *ftimes;
|
struct fgraph_times *ftimes;
|
||||||
unsigned long flags;
|
|
||||||
unsigned int trace_ctx;
|
unsigned int trace_ctx;
|
||||||
long disabled;
|
long disabled;
|
||||||
int ret;
|
int ret = 0;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
if (*task_var & TRACE_GRAPH_NOTRACE)
|
if (*task_var & TRACE_GRAPH_NOTRACE)
|
||||||
@ -235,25 +234,21 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
|
|||||||
if (tracing_thresh)
|
if (tracing_thresh)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
local_irq_save(flags);
|
preempt_disable_notrace();
|
||||||
cpu = raw_smp_processor_id();
|
cpu = raw_smp_processor_id();
|
||||||
data = per_cpu_ptr(tr->array_buffer.data, cpu);
|
data = per_cpu_ptr(tr->array_buffer.data, cpu);
|
||||||
disabled = atomic_inc_return(&data->disabled);
|
disabled = atomic_read(&data->disabled);
|
||||||
if (likely(disabled == 1)) {
|
if (likely(!disabled)) {
|
||||||
trace_ctx = tracing_gen_ctx_flags(flags);
|
trace_ctx = tracing_gen_ctx();
|
||||||
if (unlikely(IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
|
if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
|
||||||
tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR))) {
|
tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
|
||||||
unsigned long retaddr = ftrace_graph_top_ret_addr(current);
|
unsigned long retaddr = ftrace_graph_top_ret_addr(current);
|
||||||
|
|
||||||
ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
|
ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
|
||||||
} else
|
} else {
|
||||||
ret = __trace_graph_entry(tr, trace, trace_ctx);
|
ret = __trace_graph_entry(tr, trace, trace_ctx);
|
||||||
} else {
|
}
|
||||||
ret = 0;
|
|
||||||
}
|
}
|
||||||
|
preempt_enable_notrace();
|
||||||
atomic_dec(&data->disabled);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -320,7 +315,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
|
|||||||
struct trace_array *tr = gops->private;
|
struct trace_array *tr = gops->private;
|
||||||
struct trace_array_cpu *data;
|
struct trace_array_cpu *data;
|
||||||
struct fgraph_times *ftimes;
|
struct fgraph_times *ftimes;
|
||||||
unsigned long flags;
|
|
||||||
unsigned int trace_ctx;
|
unsigned int trace_ctx;
|
||||||
long disabled;
|
long disabled;
|
||||||
int size;
|
int size;
|
||||||
@ -341,16 +335,15 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
|
|||||||
|
|
||||||
trace->calltime = ftimes->calltime;
|
trace->calltime = ftimes->calltime;
|
||||||
|
|
||||||
local_irq_save(flags);
|
preempt_disable_notrace();
|
||||||
cpu = raw_smp_processor_id();
|
cpu = raw_smp_processor_id();
|
||||||
data = per_cpu_ptr(tr->array_buffer.data, cpu);
|
data = per_cpu_ptr(tr->array_buffer.data, cpu);
|
||||||
disabled = atomic_inc_return(&data->disabled);
|
disabled = atomic_read(&data->disabled);
|
||||||
if (likely(disabled == 1)) {
|
if (likely(!disabled)) {
|
||||||
trace_ctx = tracing_gen_ctx_flags(flags);
|
trace_ctx = tracing_gen_ctx();
|
||||||
__trace_graph_return(tr, trace, trace_ctx);
|
__trace_graph_return(tr, trace, trace_ctx);
|
||||||
}
|
}
|
||||||
atomic_dec(&data->disabled);
|
preempt_enable_notrace();
|
||||||
local_irq_restore(flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
|
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user