mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-13 16:40:22 +00:00
Merge branch 'tip/tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace
This commit is contained in:
commit
e2b8b28085
@ -157,7 +157,110 @@ static inline void tracepoint_synchronize_unregister(void)
|
||||
#define TRACE_FORMAT(name, proto, args, fmt) \
|
||||
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
|
||||
|
||||
#define TRACE_EVENT(name, proto, args, struct, print, assign) \
|
||||
|
||||
/*
|
||||
* For use with the TRACE_EVENT macro:
|
||||
*
|
||||
* We define a tracepoint, its arguments, its printk format
|
||||
* and its 'fast binay record' layout.
|
||||
*
|
||||
* Firstly, name your tracepoint via TRACE_EVENT(name : the
|
||||
* 'subsystem_event' notation is fine.
|
||||
*
|
||||
* Think about this whole construct as the
|
||||
* 'trace_sched_switch() function' from now on.
|
||||
*
|
||||
*
|
||||
* TRACE_EVENT(sched_switch,
|
||||
*
|
||||
* *
|
||||
* * A function has a regular function arguments
|
||||
* * prototype, declare it via TP_PROTO():
|
||||
* *
|
||||
*
|
||||
* TP_PROTO(struct rq *rq, struct task_struct *prev,
|
||||
* struct task_struct *next),
|
||||
*
|
||||
* *
|
||||
* * Define the call signature of the 'function'.
|
||||
* * (Design sidenote: we use this instead of a
|
||||
* * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.)
|
||||
* *
|
||||
*
|
||||
* TP_ARGS(rq, prev, next),
|
||||
*
|
||||
* *
|
||||
* * Fast binary tracing: define the trace record via
|
||||
* * TP_STRUCT__entry(). You can think about it like a
|
||||
* * regular C structure local variable definition.
|
||||
* *
|
||||
* * This is how the trace record is structured and will
|
||||
* * be saved into the ring buffer. These are the fields
|
||||
* * that will be exposed to user-space in
|
||||
* * /debug/tracing/events/<*>/format.
|
||||
* *
|
||||
* * The declared 'local variable' is called '__entry'
|
||||
* *
|
||||
* * __field(pid_t, prev_prid) is equivalent to a standard declariton:
|
||||
* *
|
||||
* * pid_t prev_pid;
|
||||
* *
|
||||
* * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to:
|
||||
* *
|
||||
* * char prev_comm[TASK_COMM_LEN];
|
||||
* *
|
||||
*
|
||||
* TP_STRUCT__entry(
|
||||
* __array( char, prev_comm, TASK_COMM_LEN )
|
||||
* __field( pid_t, prev_pid )
|
||||
* __field( int, prev_prio )
|
||||
* __array( char, next_comm, TASK_COMM_LEN )
|
||||
* __field( pid_t, next_pid )
|
||||
* __field( int, next_prio )
|
||||
* ),
|
||||
*
|
||||
* *
|
||||
* * Assign the entry into the trace record, by embedding
|
||||
* * a full C statement block into TP_fast_assign(). You
|
||||
* * can refer to the trace record as '__entry' -
|
||||
* * otherwise you can put arbitrary C code in here.
|
||||
* *
|
||||
* * Note: this C code will execute every time a trace event
|
||||
* * happens, on an active tracepoint.
|
||||
* *
|
||||
*
|
||||
* TP_fast_assign(
|
||||
* memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
|
||||
* __entry->prev_pid = prev->pid;
|
||||
* __entry->prev_prio = prev->prio;
|
||||
* memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
|
||||
* __entry->next_pid = next->pid;
|
||||
* __entry->next_prio = next->prio;
|
||||
* )
|
||||
*
|
||||
* *
|
||||
* * Formatted output of a trace record via TP_printk().
|
||||
* * This is how the tracepoint will appear under ftrace
|
||||
* * plugins that make use of this tracepoint.
|
||||
* *
|
||||
* * (raw-binary tracing wont actually perform this step.)
|
||||
* *
|
||||
*
|
||||
* TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
|
||||
* __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
|
||||
* __entry->next_comm, __entry->next_pid, __entry->next_prio),
|
||||
*
|
||||
* );
|
||||
*
|
||||
* This macro construct is thus used for the regular printk format
|
||||
* tracing setup, it is used to construct a function pointer based
|
||||
* tracepoint callback (this is used by programmatic plugins and
|
||||
* can also by used by generic instrumentation like SystemTap), and
|
||||
* it is also used to expose a structured trace record in
|
||||
* /debug/tracing/events/.
|
||||
*/
|
||||
|
||||
#define TRACE_EVENT(name, proto, args, struct, assign, print) \
|
||||
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
|
||||
|
||||
#endif
|
||||
|
@ -31,13 +31,13 @@ TRACE_EVENT(irq_handler_exit,
|
||||
__field( int, ret )
|
||||
),
|
||||
|
||||
TP_printk("irq=%d return=%s",
|
||||
__entry->irq, __entry->ret ? "handled" : "unhandled"),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->irq = irq;
|
||||
__entry->ret = ret;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("irq=%d return=%s",
|
||||
__entry->irq, __entry->ret ? "handled" : "unhandled")
|
||||
);
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
|
@ -22,12 +22,12 @@ TRACE_EVENT(sched_kthread_stop,
|
||||
__field( pid_t, pid )
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d", __entry->comm, __entry->pid),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
|
||||
__entry->pid = t->pid;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d", __entry->comm, __entry->pid)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -43,11 +43,11 @@ TRACE_EVENT(sched_kthread_stop_ret,
|
||||
__field( int, ret )
|
||||
),
|
||||
|
||||
TP_printk("ret %d", __entry->ret),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ret = ret;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("ret %d", __entry->ret)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -68,14 +68,14 @@ TRACE_EVENT(sched_wait_task,
|
||||
__field( int, prio )
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
__entry->comm, __entry->pid, __entry->prio),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
|
||||
__entry->pid = p->pid;
|
||||
__entry->prio = p->prio;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
__entry->comm, __entry->pid, __entry->prio)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -97,16 +97,16 @@ TRACE_EVENT(sched_wakeup,
|
||||
__field( int, success )
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] success=%d",
|
||||
__entry->comm, __entry->pid, __entry->prio,
|
||||
__entry->success),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
|
||||
__entry->pid = p->pid;
|
||||
__entry->prio = p->prio;
|
||||
__entry->success = success;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] success=%d",
|
||||
__entry->comm, __entry->pid, __entry->prio,
|
||||
__entry->success)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -128,16 +128,16 @@ TRACE_EVENT(sched_wakeup_new,
|
||||
__field( int, success )
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] success=%d",
|
||||
__entry->comm, __entry->pid, __entry->prio,
|
||||
__entry->success),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
|
||||
__entry->pid = p->pid;
|
||||
__entry->prio = p->prio;
|
||||
__entry->success = success;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] success=%d",
|
||||
__entry->comm, __entry->pid, __entry->prio,
|
||||
__entry->success)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -162,10 +162,6 @@ TRACE_EVENT(sched_switch,
|
||||
__field( int, next_prio )
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
|
||||
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
|
||||
__entry->next_comm, __entry->next_pid, __entry->next_prio),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
|
||||
__entry->prev_pid = prev->pid;
|
||||
@ -173,7 +169,11 @@ TRACE_EVENT(sched_switch,
|
||||
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
|
||||
__entry->next_pid = next->pid;
|
||||
__entry->next_prio = next->prio;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
|
||||
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
|
||||
__entry->next_comm, __entry->next_pid, __entry->next_prio)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -193,17 +193,17 @@ TRACE_EVENT(sched_migrate_task,
|
||||
__field( int, dest_cpu )
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] from: %d to: %d",
|
||||
__entry->comm, __entry->pid, __entry->prio,
|
||||
__entry->orig_cpu, __entry->dest_cpu),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
|
||||
__entry->pid = p->pid;
|
||||
__entry->prio = p->prio;
|
||||
__entry->orig_cpu = orig_cpu;
|
||||
__entry->dest_cpu = dest_cpu;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d] from: %d to: %d",
|
||||
__entry->comm, __entry->pid, __entry->prio,
|
||||
__entry->orig_cpu, __entry->dest_cpu)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -221,14 +221,14 @@ TRACE_EVENT(sched_process_free,
|
||||
__field( int, prio )
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
__entry->comm, __entry->pid, __entry->prio),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
|
||||
__entry->pid = p->pid;
|
||||
__entry->prio = p->prio;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
__entry->comm, __entry->pid, __entry->prio)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -246,14 +246,14 @@ TRACE_EVENT(sched_process_exit,
|
||||
__field( int, prio )
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
__entry->comm, __entry->pid, __entry->prio),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
|
||||
__entry->pid = p->pid;
|
||||
__entry->prio = p->prio;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
__entry->comm, __entry->pid, __entry->prio)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -271,14 +271,14 @@ TRACE_EVENT(sched_process_wait,
|
||||
__field( int, prio )
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
__entry->comm, __entry->pid, __entry->prio),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
|
||||
__entry->pid = pid_nr(pid);
|
||||
__entry->prio = current->prio;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("task %s:%d [%d]",
|
||||
__entry->comm, __entry->pid, __entry->prio)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -297,16 +297,16 @@ TRACE_EVENT(sched_process_fork,
|
||||
__field( pid_t, child_pid )
|
||||
),
|
||||
|
||||
TP_printk("parent %s:%d child %s:%d",
|
||||
__entry->parent_comm, __entry->parent_pid,
|
||||
__entry->child_comm, __entry->child_pid),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
|
||||
__entry->parent_pid = parent->pid;
|
||||
memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
|
||||
__entry->child_pid = child->pid;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("parent %s:%d child %s:%d",
|
||||
__entry->parent_comm, __entry->parent_pid,
|
||||
__entry->child_comm, __entry->child_pid)
|
||||
);
|
||||
|
||||
/*
|
||||
@ -324,14 +324,14 @@ TRACE_EVENT(sched_signal_send,
|
||||
__field( pid_t, pid )
|
||||
),
|
||||
|
||||
TP_printk("sig: %d task %s:%d",
|
||||
__entry->sig, __entry->comm, __entry->pid),
|
||||
|
||||
TP_fast_assign(
|
||||
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
|
||||
__entry->pid = p->pid;
|
||||
__entry->sig = sig;
|
||||
)
|
||||
),
|
||||
|
||||
TP_printk("sig: %d task %s:%d",
|
||||
__entry->sig, __entry->comm, __entry->pid)
|
||||
);
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
|
@ -33,7 +33,7 @@ static struct trace_array *blk_tr;
|
||||
static int __read_mostly blk_tracer_enabled;
|
||||
|
||||
/* Select an alternative, minimalistic output than the original one */
|
||||
#define TRACE_BLK_OPT_CLASSIC 0x1
|
||||
#define TRACE_BLK_OPT_CLASSIC 0x1
|
||||
|
||||
static struct tracer_opt blk_tracer_opts[] = {
|
||||
/* Default disable the minimalistic output */
|
||||
@ -564,7 +564,7 @@ EXPORT_SYMBOL_GPL(blk_trace_startstop);
|
||||
/**
|
||||
* blk_trace_ioctl: - handle the ioctls associated with tracing
|
||||
* @bdev: the block device
|
||||
* @cmd: the ioctl cmd
|
||||
* @cmd: the ioctl cmd
|
||||
* @arg: the argument data, if any
|
||||
*
|
||||
**/
|
||||
@ -1128,9 +1128,9 @@ static void blk_tracer_reset(struct trace_array *tr)
|
||||
|
||||
static struct {
|
||||
const char *act[2];
|
||||
int (*print)(struct trace_seq *s, const struct trace_entry *ent);
|
||||
int (*print)(struct trace_seq *s, const struct trace_entry *ent);
|
||||
} what2act[] __read_mostly = {
|
||||
[__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
|
||||
[__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
|
||||
[__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
|
||||
[__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
|
||||
[__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
|
||||
@ -1229,7 +1229,7 @@ static struct tracer blk_tracer __read_mostly = {
|
||||
};
|
||||
|
||||
static struct trace_event trace_blk_event = {
|
||||
.type = TRACE_BLK,
|
||||
.type = TRACE_BLK,
|
||||
.trace = blk_trace_event_print,
|
||||
.binary = blk_trace_event_print_binary,
|
||||
};
|
||||
|
@ -799,7 +799,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
|
||||
|
||||
entry->preempt_count = pc & 0xff;
|
||||
entry->pid = (tsk) ? tsk->pid : 0;
|
||||
entry->tgid = (tsk) ? tsk->tgid : 0;
|
||||
entry->tgid = (tsk) ? tsk->tgid : 0;
|
||||
entry->flags =
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
|
||||
(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
|
||||
|
@ -157,7 +157,7 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter,
|
||||
|
||||
|
||||
static struct trace_event trace_branch_event = {
|
||||
.type = TRACE_BRANCH,
|
||||
.type = TRACE_BRANCH,
|
||||
.trace = trace_branch_print,
|
||||
};
|
||||
|
||||
|
@ -102,7 +102,7 @@ static int ftrace_set_clr_event(char *buf, int set)
|
||||
mutex_lock(&event_mutex);
|
||||
events_for_each(call) {
|
||||
|
||||
if (!call->name)
|
||||
if (!call->name || !call->regfunc)
|
||||
continue;
|
||||
|
||||
if (match &&
|
||||
@ -207,8 +207,20 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
|
||||
(*pos)++;
|
||||
|
||||
if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
|
||||
return NULL;
|
||||
for (;;) {
|
||||
if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* The ftrace subsystem is for showing formats only.
|
||||
* They can not be enabled or disabled via the event files.
|
||||
*/
|
||||
if (call->regfunc)
|
||||
break;
|
||||
|
||||
call++;
|
||||
next = call;
|
||||
}
|
||||
|
||||
m->private = ++next;
|
||||
|
||||
@ -338,8 +350,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
|
||||
#undef FIELD
|
||||
#define FIELD(type, name) \
|
||||
#type, #name, (unsigned int)offsetof(typeof(field), name), \
|
||||
(unsigned int)sizeof(field.name)
|
||||
#type, #name, offsetof(typeof(field), name), sizeof(field.name)
|
||||
|
||||
static int trace_write_header(struct trace_seq *s)
|
||||
{
|
||||
@ -347,11 +358,11 @@ static int trace_write_header(struct trace_seq *s)
|
||||
|
||||
/* struct trace_entry */
|
||||
return trace_seq_printf(s,
|
||||
"\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
|
||||
"\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
|
||||
"\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
|
||||
"\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
|
||||
"\tfield:%s %s;\toffset:%u;\tsize:%u;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
|
||||
"\n",
|
||||
FIELD(unsigned char, type),
|
||||
FIELD(unsigned char, flags),
|
||||
@ -417,6 +428,13 @@ static const struct seq_operations show_set_event_seq_ops = {
|
||||
.stop = t_stop,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_avail_fops = {
|
||||
.open = ftrace_event_seq_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = seq_release,
|
||||
};
|
||||
|
||||
static const struct file_operations ftrace_set_event_fops = {
|
||||
.open = ftrace_event_seq_open,
|
||||
.read = seq_read,
|
||||
@ -558,6 +576,13 @@ static __init int event_trace_init(void)
|
||||
if (!d_tracer)
|
||||
return 0;
|
||||
|
||||
entry = debugfs_create_file("available_events", 0444, d_tracer,
|
||||
(void *)&show_event_seq_ops,
|
||||
&ftrace_avail_fops);
|
||||
if (!entry)
|
||||
pr_warning("Could not create debugfs "
|
||||
"'available_events' entry\n");
|
||||
|
||||
entry = debugfs_create_file("set_event", 0644, d_tracer,
|
||||
(void *)&show_set_event_seq_ops,
|
||||
&ftrace_set_event_fops);
|
||||
|
@ -6,11 +6,13 @@
|
||||
* struct ftrace_raw_<call> {
|
||||
* struct trace_entry ent;
|
||||
* <type> <item>;
|
||||
* <type2> <item2>[<len>];
|
||||
* [...]
|
||||
* };
|
||||
*
|
||||
* The <type> <item> is created by the TRACE_FIELD(type, item, assign)
|
||||
* macro. We simply do "type item;", and that will create the fields
|
||||
* The <type> <item> is created by the __field(type, item) macro or
|
||||
* the __array(type2, item2, len) macro.
|
||||
* We simply do "type item;", and that will create the fields
|
||||
* in the structure.
|
||||
*/
|
||||
|
||||
@ -27,7 +29,7 @@
|
||||
#define TP_STRUCT__entry(args...) args
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(name, proto, args, tstruct, print, assign) \
|
||||
#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
|
||||
struct ftrace_raw_##name { \
|
||||
struct trace_entry ent; \
|
||||
tstruct \
|
||||
|
@ -20,7 +20,7 @@
|
||||
*
|
||||
* field = (typeof(field))entry;
|
||||
*
|
||||
* ret = trace_seq_printf(s, <TP_RAW_FMT> "%s", <ARGS> "\n");
|
||||
* ret = trace_seq_printf(s, <TP_printk> "\n");
|
||||
* if (!ret)
|
||||
* return TRACE_TYPE_PARTIAL_LINE;
|
||||
*
|
||||
@ -39,7 +39,7 @@
|
||||
#define TP_printk(fmt, args...) fmt "\n", args
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, print, assign) \
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
||||
enum print_line_t \
|
||||
ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
|
||||
{ \
|
||||
@ -76,10 +76,9 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
|
||||
* int ret;
|
||||
*
|
||||
* ret = trace_seq_printf(s, #type " " #item ";"
|
||||
* " size:%d; offset:%d;\n",
|
||||
* sizeof(field.type),
|
||||
* offsetof(struct ftrace_raw_##call,
|
||||
* item));
|
||||
* " offset:%u; size:%u;\n",
|
||||
* offsetof(struct ftrace_raw_##call, item),
|
||||
* sizeof(field.type));
|
||||
*
|
||||
* }
|
||||
*/
|
||||
@ -115,7 +114,7 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
|
||||
#define TP_fast_assign(args...) args
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, print, func) \
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
|
||||
static int \
|
||||
ftrace_format_##call(struct trace_seq *s) \
|
||||
{ \
|
||||
|
@ -5,23 +5,23 @@
|
||||
*
|
||||
* static void ftrace_event_<call>(proto)
|
||||
* {
|
||||
* event_trace_printk(_RET_IP_, "<call>: " <fmt>);
|
||||
* event_trace_printk(_RET_IP_, "<call>: " <fmt>);
|
||||
* }
|
||||
*
|
||||
* static int ftrace_reg_event_<call>(void)
|
||||
* {
|
||||
* int ret;
|
||||
* int ret;
|
||||
*
|
||||
* ret = register_trace_<call>(ftrace_event_<call>);
|
||||
* if (!ret)
|
||||
* pr_info("event trace: Could not activate trace point "
|
||||
* "probe to <call>");
|
||||
* return ret;
|
||||
* ret = register_trace_<call>(ftrace_event_<call>);
|
||||
* if (!ret)
|
||||
* pr_info("event trace: Could not activate trace point "
|
||||
* "probe to <call>");
|
||||
* return ret;
|
||||
* }
|
||||
*
|
||||
* static void ftrace_unreg_event_<call>(void)
|
||||
* {
|
||||
* unregister_trace_<call>(ftrace_event_<call>);
|
||||
* unregister_trace_<call>(ftrace_event_<call>);
|
||||
* }
|
||||
*
|
||||
* For those macros defined with TRACE_FORMAT:
|
||||
@ -29,9 +29,9 @@
|
||||
* static struct ftrace_event_call __used
|
||||
* __attribute__((__aligned__(4)))
|
||||
* __attribute__((section("_ftrace_events"))) event_<call> = {
|
||||
* .name = "<call>",
|
||||
* .regfunc = ftrace_reg_event_<call>,
|
||||
* .unregfunc = ftrace_unreg_event_<call>,
|
||||
* .name = "<call>",
|
||||
* .regfunc = ftrace_reg_event_<call>,
|
||||
* .unregfunc = ftrace_unreg_event_<call>,
|
||||
* }
|
||||
*
|
||||
*
|
||||
@ -41,66 +41,66 @@
|
||||
*
|
||||
* static void ftrace_raw_event_<call>(proto)
|
||||
* {
|
||||
* struct ring_buffer_event *event;
|
||||
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
|
||||
* unsigned long irq_flags;
|
||||
* int pc;
|
||||
* struct ring_buffer_event *event;
|
||||
* struct ftrace_raw_<call> *entry; <-- defined in stage 1
|
||||
* unsigned long irq_flags;
|
||||
* int pc;
|
||||
*
|
||||
* local_save_flags(irq_flags);
|
||||
* pc = preempt_count();
|
||||
* local_save_flags(irq_flags);
|
||||
* pc = preempt_count();
|
||||
*
|
||||
* event = trace_current_buffer_lock_reserve(event_<call>.id,
|
||||
* sizeof(struct ftrace_raw_<call>),
|
||||
* irq_flags, pc);
|
||||
* if (!event)
|
||||
* return;
|
||||
* entry = ring_buffer_event_data(event);
|
||||
* event = trace_current_buffer_lock_reserve(event_<call>.id,
|
||||
* sizeof(struct ftrace_raw_<call>),
|
||||
* irq_flags, pc);
|
||||
* if (!event)
|
||||
* return;
|
||||
* entry = ring_buffer_event_data(event);
|
||||
*
|
||||
* <tstruct>; <-- Here we assign the entries by the TRACE_FIELD.
|
||||
* <assign>; <-- Here we assign the entries by the __field and
|
||||
* __array macros.
|
||||
*
|
||||
* trace_current_buffer_unlock_commit(event, irq_flags, pc);
|
||||
* trace_current_buffer_unlock_commit(event, irq_flags, pc);
|
||||
* }
|
||||
*
|
||||
* static int ftrace_raw_reg_event_<call>(void)
|
||||
* {
|
||||
* int ret;
|
||||
* int ret;
|
||||
*
|
||||
* ret = register_trace_<call>(ftrace_raw_event_<call>);
|
||||
* if (!ret)
|
||||
* pr_info("event trace: Could not activate trace point "
|
||||
* "probe to <call>");
|
||||
* return ret;
|
||||
* ret = register_trace_<call>(ftrace_raw_event_<call>);
|
||||
* if (!ret)
|
||||
* pr_info("event trace: Could not activate trace point "
|
||||
* "probe to <call>");
|
||||
* return ret;
|
||||
* }
|
||||
*
|
||||
* static void ftrace_unreg_event_<call>(void)
|
||||
* {
|
||||
* unregister_trace_<call>(ftrace_raw_event_<call>);
|
||||
* unregister_trace_<call>(ftrace_raw_event_<call>);
|
||||
* }
|
||||
*
|
||||
* static struct trace_event ftrace_event_type_<call> = {
|
||||
* .trace = ftrace_raw_output_<call>, <-- stage 2
|
||||
* .trace = ftrace_raw_output_<call>, <-- stage 2
|
||||
* };
|
||||
*
|
||||
* static int ftrace_raw_init_event_<call>(void)
|
||||
* {
|
||||
* int id;
|
||||
* int id;
|
||||
*
|
||||
* id = register_ftrace_event(&ftrace_event_type_<call>);
|
||||
* if (!id)
|
||||
* return -ENODEV;
|
||||
* event_<call>.id = id;
|
||||
* return 0;
|
||||
* id = register_ftrace_event(&ftrace_event_type_<call>);
|
||||
* if (!id)
|
||||
* return -ENODEV;
|
||||
* event_<call>.id = id;
|
||||
* return 0;
|
||||
* }
|
||||
*
|
||||
* static struct ftrace_event_call __used
|
||||
* __attribute__((__aligned__(4)))
|
||||
* __attribute__((section("_ftrace_events"))) event_<call> = {
|
||||
* .name = "<call>",
|
||||
* .regfunc = ftrace_reg_event_<call>,
|
||||
* .unregfunc = ftrace_unreg_event_<call>,
|
||||
* .raw_init = ftrace_raw_init_event_<call>,
|
||||
* .raw_reg = ftrace_raw_reg_event_<call>,
|
||||
* .raw_unreg = ftrace_raw_unreg_event_<call>,
|
||||
* .name = "<call>",
|
||||
* .system = "<system>",
|
||||
* .raw_init = ftrace_raw_init_event_<call>,
|
||||
* .regfunc = ftrace_reg_event_<call>,
|
||||
* .unregfunc = ftrace_unreg_event_<call>,
|
||||
* .show_format = ftrace_format_<call>,
|
||||
* }
|
||||
*
|
||||
@ -138,7 +138,7 @@ _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \
|
||||
static struct ftrace_event_call __used \
|
||||
__attribute__((__aligned__(4))) \
|
||||
__attribute__((section("_ftrace_events"))) event_##call = { \
|
||||
.name = #call, \
|
||||
.name = #call, \
|
||||
.system = __stringify(TRACE_SYSTEM), \
|
||||
.regfunc = ftrace_reg_event_##call, \
|
||||
.unregfunc = ftrace_unreg_event_##call, \
|
||||
@ -148,7 +148,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
|
||||
#define __entry entry
|
||||
|
||||
#undef TRACE_EVENT
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, print, assign) \
|
||||
#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
|
||||
\
|
||||
static struct ftrace_event_call event_##call; \
|
||||
\
|
||||
@ -163,7 +163,7 @@ static void ftrace_raw_event_##call(proto) \
|
||||
pc = preempt_count(); \
|
||||
\
|
||||
event = trace_current_buffer_lock_reserve(event_##call.id, \
|
||||
sizeof(struct ftrace_raw_##call), \
|
||||
sizeof(struct ftrace_raw_##call), \
|
||||
irq_flags, pc); \
|
||||
if (!event) \
|
||||
return; \
|
||||
@ -208,7 +208,7 @@ static int ftrace_raw_init_event_##call(void) \
|
||||
static struct ftrace_event_call __used \
|
||||
__attribute__((__aligned__(4))) \
|
||||
__attribute__((section("_ftrace_events"))) event_##call = { \
|
||||
.name = #call, \
|
||||
.name = #call, \
|
||||
.system = __stringify(TRACE_SYSTEM), \
|
||||
.raw_init = ftrace_raw_init_event_##call, \
|
||||
.regfunc = ftrace_raw_reg_event_##call, \
|
||||
|
@ -94,7 +94,7 @@ ftrace_format_##call(struct trace_seq *s) \
|
||||
static struct ftrace_event_call __used \
|
||||
__attribute__((__aligned__(4))) \
|
||||
__attribute__((section("_ftrace_events"))) event_##call = { \
|
||||
.name = #call, \
|
||||
.name = #call, \
|
||||
.id = proto, \
|
||||
.system = __stringify(TRACE_SYSTEM), \
|
||||
.show_format = ftrace_format_##call, \
|
||||
|
@ -841,12 +841,12 @@ static void graph_trace_close(struct trace_iterator *iter)
|
||||
}
|
||||
|
||||
static struct tracer graph_trace __read_mostly = {
|
||||
.name = "function_graph",
|
||||
.name = "function_graph",
|
||||
.open = graph_trace_open,
|
||||
.close = graph_trace_close,
|
||||
.wait_pipe = poll_wait_pipe,
|
||||
.init = graph_trace_init,
|
||||
.reset = graph_trace_reset,
|
||||
.init = graph_trace_init,
|
||||
.reset = graph_trace_reset,
|
||||
.print_line = print_graph_function,
|
||||
.print_header = print_graph_headers,
|
||||
.flags = &tracer_flags,
|
||||
|
@ -565,7 +565,7 @@ static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
|
||||
}
|
||||
|
||||
static struct trace_event trace_fn_event = {
|
||||
.type = TRACE_FN,
|
||||
.type = TRACE_FN,
|
||||
.trace = trace_fn_trace,
|
||||
.raw = trace_fn_raw,
|
||||
.hex = trace_fn_hex,
|
||||
@ -696,7 +696,7 @@ static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
|
||||
}
|
||||
|
||||
static struct trace_event trace_ctx_event = {
|
||||
.type = TRACE_CTX,
|
||||
.type = TRACE_CTX,
|
||||
.trace = trace_ctx_print,
|
||||
.raw = trace_ctx_raw,
|
||||
.hex = trace_ctx_hex,
|
||||
@ -704,7 +704,7 @@ static struct trace_event trace_ctx_event = {
|
||||
};
|
||||
|
||||
static struct trace_event trace_wake_event = {
|
||||
.type = TRACE_WAKE,
|
||||
.type = TRACE_WAKE,
|
||||
.trace = trace_wake_print,
|
||||
.raw = trace_wake_raw,
|
||||
.hex = trace_wake_hex,
|
||||
@ -759,7 +759,7 @@ static enum print_line_t trace_special_bin(struct trace_iterator *iter,
|
||||
}
|
||||
|
||||
static struct trace_event trace_special_event = {
|
||||
.type = TRACE_SPECIAL,
|
||||
.type = TRACE_SPECIAL,
|
||||
.trace = trace_special_print,
|
||||
.raw = trace_special_print,
|
||||
.hex = trace_special_hex,
|
||||
@ -796,7 +796,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter,
|
||||
}
|
||||
|
||||
static struct trace_event trace_stack_event = {
|
||||
.type = TRACE_STACK,
|
||||
.type = TRACE_STACK,
|
||||
.trace = trace_stack_print,
|
||||
.raw = trace_special_print,
|
||||
.hex = trace_special_hex,
|
||||
@ -825,7 +825,7 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
|
||||
}
|
||||
|
||||
static struct trace_event trace_user_stack_event = {
|
||||
.type = TRACE_USER_STACK,
|
||||
.type = TRACE_USER_STACK,
|
||||
.trace = trace_user_stack_print,
|
||||
.raw = trace_special_print,
|
||||
.hex = trace_special_hex,
|
||||
@ -879,7 +879,7 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
|
||||
|
||||
|
||||
static struct trace_event trace_print_event = {
|
||||
.type = TRACE_PRINT,
|
||||
.type = TRACE_PRINT,
|
||||
.trace = trace_print_print,
|
||||
.raw = trace_print_raw,
|
||||
};
|
||||
|
@ -19,14 +19,14 @@ struct cpu_workqueue_stats {
|
||||
/* Useful to know if we print the cpu headers */
|
||||
bool first_entry;
|
||||
int cpu;
|
||||
pid_t pid;
|
||||
pid_t pid;
|
||||
/* Can be inserted from interrupt or user context, need to be atomic */
|
||||
atomic_t inserted;
|
||||
atomic_t inserted;
|
||||
/*
|
||||
* Don't need to be atomic, works are serialized in a single workqueue thread
|
||||
* on a single CPU.
|
||||
*/
|
||||
unsigned int executed;
|
||||
unsigned int executed;
|
||||
};
|
||||
|
||||
/* List of workqueue threads on one cpu */
|
||||
|
Loading…
x
Reference in New Issue
Block a user