mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-14 17:53:39 +00:00
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git
This commit is contained in:
commit
9977cf38a8
@ -658,7 +658,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *op, struct ftrace_regs *fregs)
|
||||
{
|
||||
unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1];
|
||||
int bit;
|
||||
|
||||
if (unlikely(ftrace_graph_is_dead()))
|
||||
goto out;
|
||||
@ -666,14 +665,9 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
goto out;
|
||||
|
||||
bit = ftrace_test_recursion_trylock(ip, parent_ip);
|
||||
if (bit < 0)
|
||||
goto out;
|
||||
|
||||
if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
|
||||
parent_ip = ppc_function_entry(return_to_handler);
|
||||
|
||||
ftrace_test_recursion_unlock(bit);
|
||||
out:
|
||||
arch_ftrace_regs(fregs)->regs.link = parent_ip;
|
||||
}
|
||||
|
@ -790,7 +790,6 @@ static unsigned long
|
||||
__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
|
||||
{
|
||||
unsigned long return_hooker;
|
||||
int bit;
|
||||
|
||||
if (unlikely(ftrace_graph_is_dead()))
|
||||
goto out;
|
||||
@ -798,16 +797,11 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
goto out;
|
||||
|
||||
bit = ftrace_test_recursion_trylock(ip, parent);
|
||||
if (bit < 0)
|
||||
goto out;
|
||||
|
||||
return_hooker = ppc_function_entry(return_to_handler);
|
||||
|
||||
if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
|
||||
parent = return_hooker;
|
||||
|
||||
ftrace_test_recursion_unlock(bit);
|
||||
out:
|
||||
return parent;
|
||||
}
|
||||
|
@ -615,7 +615,6 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
|
||||
unsigned long frame_pointer)
|
||||
{
|
||||
unsigned long return_hooker = (unsigned long)&return_to_handler;
|
||||
int bit;
|
||||
|
||||
/*
|
||||
* When resuming from suspend-to-ram, this function can be indirectly
|
||||
@ -635,14 +634,8 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
return;
|
||||
|
||||
bit = ftrace_test_recursion_trylock(ip, *parent);
|
||||
if (bit < 0)
|
||||
return;
|
||||
|
||||
if (!function_graph_enter(*parent, ip, frame_pointer, parent))
|
||||
*parent = return_hooker;
|
||||
|
||||
ftrace_test_recursion_unlock(bit);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
|
||||
|
@ -75,6 +75,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
extern bool static_key_initialized;
|
||||
|
||||
@ -347,6 +348,8 @@ static inline void static_key_disable(struct static_key *key)
|
||||
|
||||
#endif /* CONFIG_JUMP_LABEL */
|
||||
|
||||
DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock())
|
||||
|
||||
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
|
||||
#define jump_label_enabled static_key_enabled
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/args.h>
|
||||
#include <linux/array_size.h>
|
||||
#include <linux/cleanup.h> /* for DEFINE_FREE() */
|
||||
#include <linux/compiler.h> /* for inline */
|
||||
#include <linux/types.h> /* for size_t */
|
||||
#include <linux/stddef.h> /* for NULL */
|
||||
@ -312,6 +313,8 @@ extern void *kmemdup_array(const void *src, size_t count, size_t element_size, g
|
||||
extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
|
||||
extern void argv_free(char **argv);
|
||||
|
||||
DEFINE_FREE(argv_free, char **, if (!IS_ERR_OR_NULL(_T)) argv_free(_T))
|
||||
|
||||
/* lib/cmdline.c */
|
||||
extern int get_option(char **str, int *pint);
|
||||
extern char *get_options(const char *str, int nints, int *ints);
|
||||
|
@ -673,6 +673,20 @@ struct trace_event_file {
|
||||
atomic_t tm_ref; /* trigger-mode reference counter */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HIST_TRIGGERS
|
||||
extern struct irq_work hist_poll_work;
|
||||
extern wait_queue_head_t hist_poll_wq;
|
||||
|
||||
static inline void hist_poll_wakeup(void)
|
||||
{
|
||||
if (wq_has_sleeper(&hist_poll_wq))
|
||||
irq_work_queue(&hist_poll_work);
|
||||
}
|
||||
|
||||
#define hist_poll_wait(file, wait) \
|
||||
poll_wait(file, &hist_poll_wq, wait)
|
||||
#endif
|
||||
|
||||
#define __TRACE_EVENT_FLAGS(name, value) \
|
||||
static int __init trace_init_flags_##name(void) \
|
||||
{ \
|
||||
|
@ -218,7 +218,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
#define __DEFINE_RUST_DO_TRACE(name, proto, args) \
|
||||
notrace void rust_do_trace_##name(proto) \
|
||||
{ \
|
||||
__rust_do_trace_##name(args); \
|
||||
__do_trace_##name(args); \
|
||||
}
|
||||
|
||||
/*
|
||||
@ -268,7 +268,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
|
||||
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
|
||||
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
|
||||
static inline void __rust_do_trace_##name(proto) \
|
||||
static inline void __do_trace_##name(proto) \
|
||||
{ \
|
||||
if (cond) { \
|
||||
guard(preempt_notrace)(); \
|
||||
@ -277,12 +277,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
} \
|
||||
static inline void trace_##name(proto) \
|
||||
{ \
|
||||
if (static_branch_unlikely(&__tracepoint_##name.key)) { \
|
||||
if (cond) { \
|
||||
guard(preempt_notrace)(); \
|
||||
__DO_TRACE_CALL(name, TP_ARGS(args)); \
|
||||
} \
|
||||
} \
|
||||
if (static_branch_unlikely(&__tracepoint_##name.key)) \
|
||||
__do_trace_##name(args); \
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
|
||||
WARN_ONCE(!rcu_is_watching(), \
|
||||
"RCU not watching for tracepoint"); \
|
||||
@ -291,7 +287,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
|
||||
#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \
|
||||
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
|
||||
static inline void __rust_do_trace_##name(proto) \
|
||||
static inline void __do_trace_##name(proto) \
|
||||
{ \
|
||||
guard(rcu_tasks_trace)(); \
|
||||
__DO_TRACE_CALL(name, TP_ARGS(args)); \
|
||||
@ -299,10 +295,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
|
||||
static inline void trace_##name(proto) \
|
||||
{ \
|
||||
might_fault(); \
|
||||
if (static_branch_unlikely(&__tracepoint_##name.key)) { \
|
||||
guard(rcu_tasks_trace)(); \
|
||||
__DO_TRACE_CALL(name, TP_ARGS(args)); \
|
||||
} \
|
||||
if (static_branch_unlikely(&__tracepoint_##name.key)) \
|
||||
__do_trace_##name(args); \
|
||||
if (IS_ENABLED(CONFIG_LOCKDEP)) { \
|
||||
WARN_ONCE(!rcu_is_watching(), \
|
||||
"RCU not watching for tracepoint"); \
|
||||
|
590
kernel/kprobes.c
590
kernel/kprobes.c
@ -39,6 +39,7 @@
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/execmem.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@ -140,45 +141,39 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c);
|
||||
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
|
||||
{
|
||||
struct kprobe_insn_page *kip;
|
||||
kprobe_opcode_t *slot = NULL;
|
||||
|
||||
/* Since the slot array is not protected by rcu, we need a mutex */
|
||||
mutex_lock(&c->mutex);
|
||||
retry:
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(kip, &c->pages, list) {
|
||||
if (kip->nused < slots_per_page(c)) {
|
||||
int i;
|
||||
guard(mutex)(&c->mutex);
|
||||
do {
|
||||
guard(rcu)();
|
||||
list_for_each_entry_rcu(kip, &c->pages, list) {
|
||||
if (kip->nused < slots_per_page(c)) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < slots_per_page(c); i++) {
|
||||
if (kip->slot_used[i] == SLOT_CLEAN) {
|
||||
kip->slot_used[i] = SLOT_USED;
|
||||
kip->nused++;
|
||||
slot = kip->insns + (i * c->insn_size);
|
||||
rcu_read_unlock();
|
||||
goto out;
|
||||
for (i = 0; i < slots_per_page(c); i++) {
|
||||
if (kip->slot_used[i] == SLOT_CLEAN) {
|
||||
kip->slot_used[i] = SLOT_USED;
|
||||
kip->nused++;
|
||||
return kip->insns + (i * c->insn_size);
|
||||
}
|
||||
}
|
||||
/* kip->nused is broken. Fix it. */
|
||||
kip->nused = slots_per_page(c);
|
||||
WARN_ON(1);
|
||||
}
|
||||
/* kip->nused is broken. Fix it. */
|
||||
kip->nused = slots_per_page(c);
|
||||
WARN_ON(1);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* If there are any garbage slots, collect it and try again. */
|
||||
if (c->nr_garbage && collect_garbage_slots(c) == 0)
|
||||
goto retry;
|
||||
} while (c->nr_garbage && collect_garbage_slots(c) == 0);
|
||||
|
||||
/* All out of space. Need to allocate a new page. */
|
||||
kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL);
|
||||
if (!kip)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
kip->insns = c->alloc();
|
||||
if (!kip->insns) {
|
||||
kfree(kip);
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
INIT_LIST_HEAD(&kip->list);
|
||||
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
|
||||
@ -187,14 +182,12 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
|
||||
kip->ngarbage = 0;
|
||||
kip->cache = c;
|
||||
list_add_rcu(&kip->list, &c->pages);
|
||||
slot = kip->insns;
|
||||
|
||||
/* Record the perf ksymbol register event after adding the page */
|
||||
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
|
||||
PAGE_SIZE, false, c->sym);
|
||||
out:
|
||||
mutex_unlock(&c->mutex);
|
||||
return slot;
|
||||
|
||||
return kip->insns;
|
||||
}
|
||||
|
||||
/* Return true if all garbages are collected, otherwise false. */
|
||||
@ -249,25 +242,35 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __free_insn_slot(struct kprobe_insn_cache *c,
|
||||
kprobe_opcode_t *slot, int dirty)
|
||||
static long __find_insn_page(struct kprobe_insn_cache *c,
|
||||
kprobe_opcode_t *slot, struct kprobe_insn_page **pkip)
|
||||
{
|
||||
struct kprobe_insn_page *kip;
|
||||
struct kprobe_insn_page *kip = NULL;
|
||||
long idx;
|
||||
|
||||
mutex_lock(&c->mutex);
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
list_for_each_entry_rcu(kip, &c->pages, list) {
|
||||
idx = ((long)slot - (long)kip->insns) /
|
||||
(c->insn_size * sizeof(kprobe_opcode_t));
|
||||
if (idx >= 0 && idx < slots_per_page(c))
|
||||
goto out;
|
||||
if (idx >= 0 && idx < slots_per_page(c)) {
|
||||
*pkip = kip;
|
||||
return idx;
|
||||
}
|
||||
}
|
||||
/* Could not find this slot. */
|
||||
WARN_ON(1);
|
||||
kip = NULL;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
*pkip = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
void __free_insn_slot(struct kprobe_insn_cache *c,
|
||||
kprobe_opcode_t *slot, int dirty)
|
||||
{
|
||||
struct kprobe_insn_page *kip = NULL;
|
||||
long idx;
|
||||
|
||||
guard(mutex)(&c->mutex);
|
||||
idx = __find_insn_page(c, slot, &kip);
|
||||
/* Mark and sweep: this may sleep */
|
||||
if (kip) {
|
||||
/* Check double free */
|
||||
@ -281,7 +284,6 @@ out:
|
||||
collect_one_slot(kip, idx);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&c->mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -600,47 +602,43 @@ static void kick_kprobe_optimizer(void)
|
||||
/* Kprobe jump optimizer */
|
||||
static void kprobe_optimizer(struct work_struct *work)
|
||||
{
|
||||
mutex_lock(&kprobe_mutex);
|
||||
cpus_read_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/*
|
||||
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
|
||||
* kprobes before waiting for quiesence period.
|
||||
*/
|
||||
do_unoptimize_kprobes();
|
||||
scoped_guard(cpus_read_lock) {
|
||||
guard(mutex)(&text_mutex);
|
||||
|
||||
/*
|
||||
* Step 2: Wait for quiesence period to ensure all potentially
|
||||
* preempted tasks to have normally scheduled. Because optprobe
|
||||
* may modify multiple instructions, there is a chance that Nth
|
||||
* instruction is preempted. In that case, such tasks can return
|
||||
* to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
|
||||
* Note that on non-preemptive kernel, this is transparently converted
|
||||
* to synchronoze_sched() to wait for all interrupts to have completed.
|
||||
*/
|
||||
synchronize_rcu_tasks();
|
||||
/*
|
||||
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
|
||||
* kprobes before waiting for quiesence period.
|
||||
*/
|
||||
do_unoptimize_kprobes();
|
||||
|
||||
/* Step 3: Optimize kprobes after quiesence period */
|
||||
do_optimize_kprobes();
|
||||
/*
|
||||
* Step 2: Wait for quiesence period to ensure all potentially
|
||||
* preempted tasks to have normally scheduled. Because optprobe
|
||||
* may modify multiple instructions, there is a chance that Nth
|
||||
* instruction is preempted. In that case, such tasks can return
|
||||
* to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
|
||||
* Note that on non-preemptive kernel, this is transparently converted
|
||||
* to synchronoze_sched() to wait for all interrupts to have completed.
|
||||
*/
|
||||
synchronize_rcu_tasks();
|
||||
|
||||
/* Step 4: Free cleaned kprobes after quiesence period */
|
||||
do_free_cleaned_kprobes();
|
||||
/* Step 3: Optimize kprobes after quiesence period */
|
||||
do_optimize_kprobes();
|
||||
|
||||
mutex_unlock(&text_mutex);
|
||||
cpus_read_unlock();
|
||||
/* Step 4: Free cleaned kprobes after quiesence period */
|
||||
do_free_cleaned_kprobes();
|
||||
}
|
||||
|
||||
/* Step 5: Kick optimizer again if needed */
|
||||
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
|
||||
kick_kprobe_optimizer();
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
/* Wait for completing optimization and unoptimization */
|
||||
void wait_for_kprobe_optimizer(void)
|
||||
static void wait_for_kprobe_optimizer_locked(void)
|
||||
{
|
||||
mutex_lock(&kprobe_mutex);
|
||||
lockdep_assert_held(&kprobe_mutex);
|
||||
|
||||
while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
@ -652,8 +650,14 @@ void wait_for_kprobe_optimizer(void)
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
/* Wait for completing optimization and unoptimization */
|
||||
void wait_for_kprobe_optimizer(void)
|
||||
{
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
wait_for_kprobe_optimizer_locked();
|
||||
}
|
||||
|
||||
bool optprobe_queued_unopt(struct optimized_kprobe *op)
|
||||
@ -852,29 +856,24 @@ static void try_to_optimize_kprobe(struct kprobe *p)
|
||||
return;
|
||||
|
||||
/* For preparing optimization, jump_label_text_reserved() is called. */
|
||||
cpus_read_lock();
|
||||
jump_label_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
guard(cpus_read_lock)();
|
||||
guard(jump_label_lock)();
|
||||
guard(mutex)(&text_mutex);
|
||||
|
||||
ap = alloc_aggr_kprobe(p);
|
||||
if (!ap)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
op = container_of(ap, struct optimized_kprobe, kp);
|
||||
if (!arch_prepared_optinsn(&op->optinsn)) {
|
||||
/* If failed to setup optimizing, fallback to kprobe. */
|
||||
arch_remove_optimized_kprobe(op);
|
||||
kfree(op);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
init_aggr_kprobe(ap, p);
|
||||
optimize_kprobe(ap); /* This just kicks optimizer thread. */
|
||||
|
||||
out:
|
||||
mutex_unlock(&text_mutex);
|
||||
jump_label_unlock();
|
||||
cpus_read_unlock();
|
||||
}
|
||||
|
||||
static void optimize_all_kprobes(void)
|
||||
@ -883,10 +882,10 @@ static void optimize_all_kprobes(void)
|
||||
struct kprobe *p;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
/* If optimization is already allowed, just return. */
|
||||
if (kprobes_allow_optimization)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
cpus_read_lock();
|
||||
kprobes_allow_optimization = true;
|
||||
@ -898,8 +897,6 @@ static void optimize_all_kprobes(void)
|
||||
}
|
||||
cpus_read_unlock();
|
||||
pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
@ -909,12 +906,10 @@ static void unoptimize_all_kprobes(void)
|
||||
struct kprobe *p;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
/* If optimization is already prohibited, just return. */
|
||||
if (!kprobes_allow_optimization) {
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
if (!kprobes_allow_optimization)
|
||||
return;
|
||||
}
|
||||
|
||||
cpus_read_lock();
|
||||
kprobes_allow_optimization = false;
|
||||
@ -926,10 +921,8 @@ static void unoptimize_all_kprobes(void)
|
||||
}
|
||||
}
|
||||
cpus_read_unlock();
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
/* Wait for unoptimizing completion. */
|
||||
wait_for_kprobe_optimizer();
|
||||
wait_for_kprobe_optimizer_locked();
|
||||
pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
|
||||
}
|
||||
|
||||
@ -941,7 +934,7 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table,
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&kprobe_sysctl_mutex);
|
||||
guard(mutex)(&kprobe_sysctl_mutex);
|
||||
sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
|
||||
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
||||
|
||||
@ -949,7 +942,6 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table,
|
||||
optimize_all_kprobes();
|
||||
else
|
||||
unoptimize_all_kprobes();
|
||||
mutex_unlock(&kprobe_sysctl_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1024,7 +1016,8 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
|
||||
#define __arm_kprobe(p) arch_arm_kprobe(p)
|
||||
#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
|
||||
#define kprobe_disarmed(p) kprobe_disabled(p)
|
||||
#define wait_for_kprobe_optimizer() do {} while (0)
|
||||
#define wait_for_kprobe_optimizer_locked() \
|
||||
lockdep_assert_held(&kprobe_mutex)
|
||||
|
||||
static int reuse_unused_kprobe(struct kprobe *ap)
|
||||
{
|
||||
@ -1078,20 +1071,18 @@ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
|
||||
|
||||
if (*cnt == 0) {
|
||||
ret = register_ftrace_function(ops);
|
||||
if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
|
||||
goto err_ftrace;
|
||||
if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) {
|
||||
/*
|
||||
* At this point, sinec ops is not registered, we should be sefe from
|
||||
* registering empty filter.
|
||||
*/
|
||||
ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
(*cnt)++;
|
||||
return ret;
|
||||
|
||||
err_ftrace:
|
||||
/*
|
||||
* At this point, sinec ops is not registered, we should be sefe from
|
||||
* registering empty filter.
|
||||
*/
|
||||
ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int arm_kprobe_ftrace(struct kprobe *p)
|
||||
@ -1163,12 +1154,9 @@ static int arm_kprobe(struct kprobe *kp)
|
||||
if (unlikely(kprobe_ftrace(kp)))
|
||||
return arm_kprobe_ftrace(kp);
|
||||
|
||||
cpus_read_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
guard(cpus_read_lock)();
|
||||
guard(mutex)(&text_mutex);
|
||||
__arm_kprobe(kp);
|
||||
mutex_unlock(&text_mutex);
|
||||
cpus_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1177,12 +1165,9 @@ static int disarm_kprobe(struct kprobe *kp, bool reopt)
|
||||
if (unlikely(kprobe_ftrace(kp)))
|
||||
return disarm_kprobe_ftrace(kp);
|
||||
|
||||
cpus_read_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
guard(cpus_read_lock)();
|
||||
guard(mutex)(&text_mutex);
|
||||
__disarm_kprobe(kp, reopt);
|
||||
mutex_unlock(&text_mutex);
|
||||
cpus_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1299,63 +1284,56 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
|
||||
int ret = 0;
|
||||
struct kprobe *ap = orig_p;
|
||||
|
||||
cpus_read_lock();
|
||||
scoped_guard(cpus_read_lock) {
|
||||
/* For preparing optimization, jump_label_text_reserved() is called */
|
||||
guard(jump_label_lock)();
|
||||
guard(mutex)(&text_mutex);
|
||||
|
||||
/* For preparing optimization, jump_label_text_reserved() is called */
|
||||
jump_label_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
|
||||
if (!kprobe_aggrprobe(orig_p)) {
|
||||
/* If 'orig_p' is not an 'aggr_kprobe', create new one. */
|
||||
ap = alloc_aggr_kprobe(orig_p);
|
||||
if (!ap) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
if (!kprobe_aggrprobe(orig_p)) {
|
||||
/* If 'orig_p' is not an 'aggr_kprobe', create new one. */
|
||||
ap = alloc_aggr_kprobe(orig_p);
|
||||
if (!ap)
|
||||
return -ENOMEM;
|
||||
init_aggr_kprobe(ap, orig_p);
|
||||
} else if (kprobe_unused(ap)) {
|
||||
/* This probe is going to die. Rescue it */
|
||||
ret = reuse_unused_kprobe(ap);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
init_aggr_kprobe(ap, orig_p);
|
||||
} else if (kprobe_unused(ap)) {
|
||||
/* This probe is going to die. Rescue it */
|
||||
ret = reuse_unused_kprobe(ap);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (kprobe_gone(ap)) {
|
||||
/*
|
||||
* Attempting to insert new probe at the same location that
|
||||
* had a probe in the module vaddr area which already
|
||||
* freed. So, the instruction slot has already been
|
||||
* released. We need a new slot for the new probe.
|
||||
*/
|
||||
ret = arch_prepare_kprobe(ap);
|
||||
if (ret)
|
||||
if (kprobe_gone(ap)) {
|
||||
/*
|
||||
* Even if fail to allocate new slot, don't need to
|
||||
* free the 'ap'. It will be used next time, or
|
||||
* freed by unregister_kprobe().
|
||||
* Attempting to insert new probe at the same location that
|
||||
* had a probe in the module vaddr area which already
|
||||
* freed. So, the instruction slot has already been
|
||||
* released. We need a new slot for the new probe.
|
||||
*/
|
||||
goto out;
|
||||
ret = arch_prepare_kprobe(ap);
|
||||
if (ret)
|
||||
/*
|
||||
* Even if fail to allocate new slot, don't need to
|
||||
* free the 'ap'. It will be used next time, or
|
||||
* freed by unregister_kprobe().
|
||||
*/
|
||||
return ret;
|
||||
|
||||
/* Prepare optimized instructions if possible. */
|
||||
prepare_optimized_kprobe(ap);
|
||||
/* Prepare optimized instructions if possible. */
|
||||
prepare_optimized_kprobe(ap);
|
||||
|
||||
/*
|
||||
* Clear gone flag to prevent allocating new slot again, and
|
||||
* set disabled flag because it is not armed yet.
|
||||
*/
|
||||
ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
|
||||
| KPROBE_FLAG_DISABLED;
|
||||
/*
|
||||
* Clear gone flag to prevent allocating new slot again, and
|
||||
* set disabled flag because it is not armed yet.
|
||||
*/
|
||||
ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
|
||||
| KPROBE_FLAG_DISABLED;
|
||||
}
|
||||
|
||||
/* Copy the insn slot of 'p' to 'ap'. */
|
||||
copy_kprobe(ap, p);
|
||||
ret = add_new_kprobe(ap, p);
|
||||
}
|
||||
|
||||
/* Copy the insn slot of 'p' to 'ap'. */
|
||||
copy_kprobe(ap, p);
|
||||
ret = add_new_kprobe(ap, p);
|
||||
|
||||
out:
|
||||
mutex_unlock(&text_mutex);
|
||||
jump_label_unlock();
|
||||
cpus_read_unlock();
|
||||
|
||||
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
|
||||
ap->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
if (!kprobes_all_disarmed) {
|
||||
@ -1448,7 +1426,7 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
|
||||
unsigned long offset, bool *on_func_entry)
|
||||
{
|
||||
if ((symbol_name && addr) || (!symbol_name && !addr))
|
||||
goto invalid;
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (symbol_name) {
|
||||
/*
|
||||
@ -1478,16 +1456,16 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
|
||||
* at the start of the function.
|
||||
*/
|
||||
addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
|
||||
if (addr)
|
||||
return addr;
|
||||
if (!addr)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
invalid:
|
||||
return ERR_PTR(-EINVAL);
|
||||
return addr;
|
||||
}
|
||||
|
||||
static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
|
||||
{
|
||||
bool on_func_entry;
|
||||
|
||||
return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
|
||||
}
|
||||
|
||||
@ -1505,15 +1483,15 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
|
||||
if (unlikely(!ap))
|
||||
return NULL;
|
||||
|
||||
if (p != ap) {
|
||||
list_for_each_entry(list_p, &ap->list, list)
|
||||
if (list_p == p)
|
||||
/* kprobe p is a valid probe */
|
||||
goto valid;
|
||||
return NULL;
|
||||
}
|
||||
valid:
|
||||
return ap;
|
||||
if (p == ap)
|
||||
return ap;
|
||||
|
||||
list_for_each_entry(list_p, &ap->list, list)
|
||||
if (list_p == p)
|
||||
/* kprobe p is a valid probe */
|
||||
return ap;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1522,14 +1500,12 @@ valid:
|
||||
*/
|
||||
static inline int warn_kprobe_rereg(struct kprobe *p)
|
||||
{
|
||||
int ret = 0;
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
if (WARN_ON_ONCE(__get_valid_kprobe(p)))
|
||||
ret = -EINVAL;
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return -EINVAL;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_ftrace_location(struct kprobe *p)
|
||||
@ -1565,17 +1541,23 @@ static int check_kprobe_address_safe(struct kprobe *p,
|
||||
ret = check_ftrace_location(p);
|
||||
if (ret)
|
||||
return ret;
|
||||
jump_label_lock();
|
||||
preempt_disable();
|
||||
|
||||
guard(jump_label_lock)();
|
||||
|
||||
/* Ensure the address is in a text area, and find a module if exists. */
|
||||
*probed_mod = NULL;
|
||||
if (!core_kernel_text((unsigned long) p->addr)) {
|
||||
guard(preempt)();
|
||||
*probed_mod = __module_text_address((unsigned long) p->addr);
|
||||
if (!(*probed_mod)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (!(*probed_mod))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* We must hold a refcount of the probed module while updating
|
||||
* its code to prohibit unexpected unloading.
|
||||
*/
|
||||
if (unlikely(!try_module_get(*probed_mod)))
|
||||
return -ENOENT;
|
||||
}
|
||||
/* Ensure it is not in reserved area. */
|
||||
if (in_gate_area_no_mm((unsigned long) p->addr) ||
|
||||
@ -1584,21 +1566,12 @@ static int check_kprobe_address_safe(struct kprobe *p,
|
||||
static_call_text_reserved(p->addr, p->addr) ||
|
||||
find_bug((unsigned long)p->addr) ||
|
||||
is_cfi_preamble_symbol((unsigned long)p->addr)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
module_put(*probed_mod);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get module refcount and reject __init functions for loaded modules. */
|
||||
if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
|
||||
/*
|
||||
* We must hold a refcount of the probed module while updating
|
||||
* its code to prohibit unexpected unloading.
|
||||
*/
|
||||
if (unlikely(!try_module_get(*probed_mod))) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the module freed '.init.text', we couldn't insert
|
||||
* kprobes in there.
|
||||
@ -1606,27 +1579,58 @@ static int check_kprobe_address_safe(struct kprobe *p,
|
||||
if (within_module_init((unsigned long)p->addr, *probed_mod) &&
|
||||
!module_is_coming(*probed_mod)) {
|
||||
module_put(*probed_mod);
|
||||
*probed_mod = NULL;
|
||||
ret = -ENOENT;
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
jump_label_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
static int __register_kprobe(struct kprobe *p)
|
||||
{
|
||||
int ret;
|
||||
struct kprobe *old_p;
|
||||
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
old_p = get_kprobe(p->addr);
|
||||
if (old_p)
|
||||
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
|
||||
return register_aggr_kprobe(old_p, p);
|
||||
|
||||
scoped_guard(cpus_read_lock) {
|
||||
/* Prevent text modification */
|
||||
guard(mutex)(&text_mutex);
|
||||
ret = prepare_kprobe(p);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
INIT_HLIST_NODE(&p->hlist);
|
||||
hlist_add_head_rcu(&p->hlist,
|
||||
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
|
||||
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
|
||||
ret = arm_kprobe(p);
|
||||
if (ret) {
|
||||
hlist_del_rcu(&p->hlist);
|
||||
synchronize_rcu();
|
||||
}
|
||||
}
|
||||
|
||||
/* Try to optimize kprobe */
|
||||
try_to_optimize_kprobe(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int register_kprobe(struct kprobe *p)
|
||||
{
|
||||
int ret;
|
||||
struct kprobe *old_p;
|
||||
struct module *probed_mod;
|
||||
kprobe_opcode_t *addr;
|
||||
bool on_func_entry;
|
||||
|
||||
/* Adjust probe address from symbol */
|
||||
/* Canonicalize probe address from symbol */
|
||||
addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
|
||||
if (IS_ERR(addr))
|
||||
return PTR_ERR(addr);
|
||||
@ -1638,6 +1642,8 @@ int register_kprobe(struct kprobe *p)
|
||||
|
||||
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
|
||||
p->flags &= KPROBE_FLAG_DISABLED;
|
||||
if (on_func_entry)
|
||||
p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
|
||||
p->nmissed = 0;
|
||||
INIT_LIST_HEAD(&p->list);
|
||||
|
||||
@ -1645,44 +1651,7 @@ int register_kprobe(struct kprobe *p)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
|
||||
if (on_func_entry)
|
||||
p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
|
||||
|
||||
old_p = get_kprobe(p->addr);
|
||||
if (old_p) {
|
||||
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
|
||||
ret = register_aggr_kprobe(old_p, p);
|
||||
goto out;
|
||||
}
|
||||
|
||||
cpus_read_lock();
|
||||
/* Prevent text modification */
|
||||
mutex_lock(&text_mutex);
|
||||
ret = prepare_kprobe(p);
|
||||
mutex_unlock(&text_mutex);
|
||||
cpus_read_unlock();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
INIT_HLIST_NODE(&p->hlist);
|
||||
hlist_add_head_rcu(&p->hlist,
|
||||
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
|
||||
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
|
||||
ret = arm_kprobe(p);
|
||||
if (ret) {
|
||||
hlist_del_rcu(&p->hlist);
|
||||
synchronize_rcu();
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Try to optimize kprobe */
|
||||
try_to_optimize_kprobe(p);
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
ret = __register_kprobe(p);
|
||||
|
||||
if (probed_mod)
|
||||
module_put(probed_mod);
|
||||
@ -1761,29 +1730,31 @@ static int __unregister_kprobe_top(struct kprobe *p)
|
||||
if (IS_ERR(ap))
|
||||
return PTR_ERR(ap);
|
||||
|
||||
if (ap == p)
|
||||
/*
|
||||
* This probe is an independent(and non-optimized) kprobe
|
||||
* (not an aggrprobe). Remove from the hash list.
|
||||
*/
|
||||
goto disarmed;
|
||||
WARN_ON(ap != p && !kprobe_aggrprobe(ap));
|
||||
|
||||
/* Following process expects this probe is an aggrprobe */
|
||||
WARN_ON(!kprobe_aggrprobe(ap));
|
||||
|
||||
if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
|
||||
/*
|
||||
* If the probe is an independent(and non-optimized) kprobe
|
||||
* (not an aggrprobe), the last kprobe on the aggrprobe, or
|
||||
* kprobe is already disarmed, just remove from the hash list.
|
||||
*/
|
||||
if (ap == p ||
|
||||
(list_is_singular(&ap->list) && kprobe_disarmed(ap))) {
|
||||
/*
|
||||
* !disarmed could be happen if the probe is under delayed
|
||||
* unoptimizing.
|
||||
*/
|
||||
goto disarmed;
|
||||
else {
|
||||
/* If disabling probe has special handlers, update aggrprobe */
|
||||
if (p->post_handler && !kprobe_gone(p)) {
|
||||
list_for_each_entry(list_p, &ap->list, list) {
|
||||
if ((list_p != p) && (list_p->post_handler))
|
||||
goto noclean;
|
||||
}
|
||||
hlist_del_rcu(&ap->hlist);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If disabling probe has special handlers, update aggrprobe */
|
||||
if (p->post_handler && !kprobe_gone(p)) {
|
||||
list_for_each_entry(list_p, &ap->list, list) {
|
||||
if ((list_p != p) && (list_p->post_handler))
|
||||
break;
|
||||
}
|
||||
/* No other probe has post_handler */
|
||||
if (list_entry_is_head(list_p, &ap->list, list)) {
|
||||
/*
|
||||
* For the kprobe-on-ftrace case, we keep the
|
||||
* post_handler setting to identify this aggrprobe
|
||||
@ -1792,24 +1763,21 @@ static int __unregister_kprobe_top(struct kprobe *p)
|
||||
if (!kprobe_ftrace(ap))
|
||||
ap->post_handler = NULL;
|
||||
}
|
||||
noclean:
|
||||
/*
|
||||
* Remove from the aggrprobe: this path will do nothing in
|
||||
* __unregister_kprobe_bottom().
|
||||
*/
|
||||
list_del_rcu(&p->list);
|
||||
if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
|
||||
/*
|
||||
* Try to optimize this probe again, because post
|
||||
* handler may have been changed.
|
||||
*/
|
||||
optimize_kprobe(ap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove from the aggrprobe: this path will do nothing in
|
||||
* __unregister_kprobe_bottom().
|
||||
*/
|
||||
list_del_rcu(&p->list);
|
||||
if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
|
||||
/*
|
||||
* Try to optimize this probe again, because post
|
||||
* handler may have been changed.
|
||||
*/
|
||||
optimize_kprobe(ap);
|
||||
return 0;
|
||||
|
||||
disarmed:
|
||||
hlist_del_rcu(&ap->hlist);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __unregister_kprobe_bottom(struct kprobe *p)
|
||||
@ -1858,12 +1826,11 @@ void unregister_kprobes(struct kprobe **kps, int num)
|
||||
|
||||
if (num <= 0)
|
||||
return;
|
||||
mutex_lock(&kprobe_mutex);
|
||||
for (i = 0; i < num; i++)
|
||||
if (__unregister_kprobe_top(kps[i]) < 0)
|
||||
kps[i]->addr = NULL;
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
scoped_guard(mutex, &kprobe_mutex) {
|
||||
for (i = 0; i < num; i++)
|
||||
if (__unregister_kprobe_top(kps[i]) < 0)
|
||||
kps[i]->addr = NULL;
|
||||
}
|
||||
synchronize_rcu();
|
||||
for (i = 0; i < num; i++)
|
||||
if (kps[i]->addr)
|
||||
@ -2302,8 +2269,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
|
||||
|
||||
if (num <= 0)
|
||||
return;
|
||||
mutex_lock(&kprobe_mutex);
|
||||
for (i = 0; i < num; i++) {
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
if (__unregister_kprobe_top(&rps[i]->kp) < 0)
|
||||
rps[i]->kp.addr = NULL;
|
||||
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
|
||||
@ -2312,7 +2280,6 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
|
||||
rcu_assign_pointer(rps[i]->rph->rp, NULL);
|
||||
#endif
|
||||
}
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
synchronize_rcu();
|
||||
for (i = 0; i < num; i++) {
|
||||
@ -2393,18 +2360,14 @@ static void kill_kprobe(struct kprobe *p)
|
||||
/* Disable one kprobe */
|
||||
int disable_kprobe(struct kprobe *kp)
|
||||
{
|
||||
int ret = 0;
|
||||
struct kprobe *p;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/* Disable this kprobe */
|
||||
p = __disable_kprobe(kp);
|
||||
if (IS_ERR(p))
|
||||
ret = PTR_ERR(p);
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
return IS_ERR(p) ? PTR_ERR(p) : 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disable_kprobe);
|
||||
|
||||
@ -2414,20 +2377,16 @@ int enable_kprobe(struct kprobe *kp)
|
||||
int ret = 0;
|
||||
struct kprobe *p;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/* Check whether specified probe is valid. */
|
||||
p = __get_valid_kprobe(kp);
|
||||
if (unlikely(p == NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(p == NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (kprobe_gone(kp)) {
|
||||
if (kprobe_gone(kp))
|
||||
/* This kprobe has gone, we couldn't enable it. */
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
return -EINVAL;
|
||||
|
||||
if (p != kp)
|
||||
kp->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
@ -2441,8 +2400,6 @@ int enable_kprobe(struct kprobe *kp)
|
||||
kp->flags |= KPROBE_FLAG_DISABLED;
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enable_kprobe);
|
||||
@ -2630,11 +2587,11 @@ static int kprobes_module_callback(struct notifier_block *nb,
|
||||
unsigned int i;
|
||||
int checkcore = (val == MODULE_STATE_GOING);
|
||||
|
||||
if (val == MODULE_STATE_COMING) {
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
if (val == MODULE_STATE_COMING)
|
||||
add_module_kprobe_blacklist(mod);
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
@ -2644,7 +2601,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
|
||||
* notified, only '.init.text' section would be freed. We need to
|
||||
* disable kprobes which have been inserted in the sections.
|
||||
*/
|
||||
mutex_lock(&kprobe_mutex);
|
||||
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
||||
head = &kprobe_table[i];
|
||||
hlist_for_each_entry(p, head, hlist)
|
||||
@ -2667,7 +2623,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
|
||||
}
|
||||
if (val == MODULE_STATE_GOING)
|
||||
remove_module_kprobe_blacklist(mod);
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
@ -2695,7 +2650,7 @@ void kprobe_free_init_mem(void)
|
||||
struct kprobe *p;
|
||||
int i;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/* Kill all kprobes on initmem because the target code has been freed. */
|
||||
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
||||
@ -2705,8 +2660,6 @@ void kprobe_free_init_mem(void)
|
||||
kill_kprobe(p);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
static int __init init_kprobes(void)
|
||||
@ -2902,11 +2855,11 @@ static int arm_all_kprobes(void)
|
||||
unsigned int i, total = 0, errors = 0;
|
||||
int err, ret = 0;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/* If kprobes are armed, just return */
|
||||
if (!kprobes_all_disarmed)
|
||||
goto already_enabled;
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* optimize_kprobe() called by arm_kprobe() checks
|
||||
@ -2936,8 +2889,6 @@ static int arm_all_kprobes(void)
|
||||
else
|
||||
pr_info("Kprobes globally enabled\n");
|
||||
|
||||
already_enabled:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2948,13 +2899,11 @@ static int disarm_all_kprobes(void)
|
||||
unsigned int i, total = 0, errors = 0;
|
||||
int err, ret = 0;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/* If kprobes are already disarmed, just return */
|
||||
if (kprobes_all_disarmed) {
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
if (kprobes_all_disarmed)
|
||||
return 0;
|
||||
}
|
||||
|
||||
kprobes_all_disarmed = true;
|
||||
|
||||
@ -2979,11 +2928,8 @@ static int disarm_all_kprobes(void)
|
||||
else
|
||||
pr_info("Kprobes globally disabled\n");
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
/* Wait for disarming all kprobes by optimizer */
|
||||
wait_for_kprobe_optimizer();
|
||||
|
||||
wait_for_kprobe_optimizer_locked();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -650,8 +650,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
|
||||
struct ftrace_graph_ent trace;
|
||||
unsigned long bitmap = 0;
|
||||
int offset;
|
||||
int bit;
|
||||
int i;
|
||||
|
||||
bit = ftrace_test_recursion_trylock(func, ret);
|
||||
if (bit < 0)
|
||||
return -EBUSY;
|
||||
|
||||
trace.func = func;
|
||||
trace.depth = ++current->curr_ret_depth;
|
||||
|
||||
@ -697,12 +702,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
|
||||
* flag, set that bit always.
|
||||
*/
|
||||
set_bitmap(current, offset, bitmap | BIT(0));
|
||||
|
||||
ftrace_test_recursion_unlock(bit);
|
||||
return 0;
|
||||
out_ret:
|
||||
current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
|
||||
out:
|
||||
current->curr_ret_depth--;
|
||||
ftrace_test_recursion_unlock(bit);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -536,24 +536,21 @@ static int function_stat_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ftrace_profile *rec = v;
|
||||
char str[KSYM_SYMBOL_LEN];
|
||||
int ret = 0;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static struct trace_seq s;
|
||||
unsigned long long avg;
|
||||
unsigned long long stddev;
|
||||
#endif
|
||||
mutex_lock(&ftrace_profile_lock);
|
||||
guard(mutex)(&ftrace_profile_lock);
|
||||
|
||||
/* we raced with function_profile_reset() */
|
||||
if (unlikely(rec->counter == 0)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(rec->counter == 0))
|
||||
return -EBUSY;
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
avg = div64_ul(rec->time, rec->counter);
|
||||
if (tracing_thresh && (avg < tracing_thresh))
|
||||
goto out;
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
|
||||
@ -590,10 +587,8 @@ static int function_stat_show(struct seq_file *m, void *v)
|
||||
trace_print_seq(m, &s);
|
||||
#endif
|
||||
seq_putc(m, '\n');
|
||||
out:
|
||||
mutex_unlock(&ftrace_profile_lock);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
|
||||
@ -789,27 +784,24 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
|
||||
{
|
||||
struct ftrace_profile_stat *stat;
|
||||
struct ftrace_profile *rec;
|
||||
unsigned long flags;
|
||||
|
||||
if (!ftrace_profile_enabled)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
guard(preempt_notrace)();
|
||||
|
||||
stat = this_cpu_ptr(&ftrace_profile_stats);
|
||||
if (!stat->hash || !ftrace_profile_enabled)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
rec = ftrace_find_profiled_func(stat, ip);
|
||||
if (!rec) {
|
||||
rec = ftrace_profile_alloc(stat, ip);
|
||||
if (!rec)
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
rec->counter++;
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
@ -856,19 +848,19 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
|
||||
unsigned long long calltime;
|
||||
unsigned long long rettime = trace_clock_local();
|
||||
struct ftrace_profile *rec;
|
||||
unsigned long flags;
|
||||
int size;
|
||||
|
||||
local_irq_save(flags);
|
||||
guard(preempt_notrace)();
|
||||
|
||||
stat = this_cpu_ptr(&ftrace_profile_stats);
|
||||
if (!stat->hash || !ftrace_profile_enabled)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
profile_data = fgraph_retrieve_data(gops->idx, &size);
|
||||
|
||||
/* If the calltime was zero'd ignore it */
|
||||
if (!profile_data || !profile_data->calltime)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
calltime = rettime - profile_data->calltime;
|
||||
|
||||
@ -896,9 +888,6 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
|
||||
rec->time += calltime;
|
||||
rec->time_squared += calltime * calltime;
|
||||
}
|
||||
|
||||
out:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static struct fgraph_ops fprofiler_ops = {
|
||||
@ -946,20 +935,16 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
|
||||
|
||||
val = !!val;
|
||||
|
||||
mutex_lock(&ftrace_profile_lock);
|
||||
guard(mutex)(&ftrace_profile_lock);
|
||||
if (ftrace_profile_enabled ^ val) {
|
||||
if (val) {
|
||||
ret = ftrace_profile_init();
|
||||
if (ret < 0) {
|
||||
cnt = ret;
|
||||
goto out;
|
||||
}
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = register_ftrace_profiler();
|
||||
if (ret < 0) {
|
||||
cnt = ret;
|
||||
goto out;
|
||||
}
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ftrace_profile_enabled = 1;
|
||||
} else {
|
||||
ftrace_profile_enabled = 0;
|
||||
@ -970,8 +955,6 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
|
||||
unregister_ftrace_profiler();
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&ftrace_profile_lock);
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
@ -1671,14 +1654,12 @@ unsigned long ftrace_location(unsigned long ip)
|
||||
loc = ftrace_location_range(ip, ip);
|
||||
if (!loc) {
|
||||
if (!kallsyms_lookup_size_offset(ip, &size, &offset))
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
/* map sym+0 to __fentry__ */
|
||||
if (!offset)
|
||||
loc = ftrace_location_range(ip, ip + size - 1);
|
||||
}
|
||||
|
||||
out:
|
||||
return loc;
|
||||
}
|
||||
|
||||
@ -2073,7 +2054,7 @@ rollback:
|
||||
continue;
|
||||
|
||||
if (rec == end)
|
||||
goto err_out;
|
||||
return -EBUSY;
|
||||
|
||||
in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
|
||||
in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
|
||||
@ -2086,7 +2067,6 @@ rollback:
|
||||
rec->flags |= FTRACE_FL_IPMODIFY;
|
||||
} while_for_each_ftrace_rec();
|
||||
|
||||
err_out:
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -5615,20 +5595,15 @@ static DEFINE_MUTEX(ftrace_cmd_mutex);
|
||||
__init int register_ftrace_command(struct ftrace_func_command *cmd)
|
||||
{
|
||||
struct ftrace_func_command *p;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&ftrace_cmd_mutex);
|
||||
guard(mutex)(&ftrace_cmd_mutex);
|
||||
list_for_each_entry(p, &ftrace_commands, list) {
|
||||
if (strcmp(cmd->name, p->name) == 0) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (strcmp(cmd->name, p->name) == 0)
|
||||
return -EBUSY;
|
||||
}
|
||||
list_add(&cmd->list, &ftrace_commands);
|
||||
out_unlock:
|
||||
mutex_unlock(&ftrace_cmd_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5638,20 +5613,17 @@ __init int register_ftrace_command(struct ftrace_func_command *cmd)
|
||||
__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
|
||||
{
|
||||
struct ftrace_func_command *p, *n;
|
||||
int ret = -ENODEV;
|
||||
|
||||
mutex_lock(&ftrace_cmd_mutex);
|
||||
guard(mutex)(&ftrace_cmd_mutex);
|
||||
|
||||
list_for_each_entry_safe(p, n, &ftrace_commands, list) {
|
||||
if (strcmp(cmd->name, p->name) == 0) {
|
||||
ret = 0;
|
||||
list_del_init(&p->list);
|
||||
goto out_unlock;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&ftrace_cmd_mutex);
|
||||
|
||||
return ret;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int ftrace_process_regex(struct ftrace_iterator *iter,
|
||||
@ -5661,7 +5633,7 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
|
||||
struct trace_array *tr = iter->ops->private;
|
||||
char *func, *command, *next = buff;
|
||||
struct ftrace_func_command *p;
|
||||
int ret = -EINVAL;
|
||||
int ret;
|
||||
|
||||
func = strsep(&next, ":");
|
||||
|
||||
@ -5678,17 +5650,14 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
|
||||
|
||||
command = strsep(&next, ":");
|
||||
|
||||
mutex_lock(&ftrace_cmd_mutex);
|
||||
list_for_each_entry(p, &ftrace_commands, list) {
|
||||
if (strcmp(p->name, command) == 0) {
|
||||
ret = p->func(tr, hash, func, command, next, enable);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&ftrace_cmd_mutex);
|
||||
guard(mutex)(&ftrace_cmd_mutex);
|
||||
|
||||
return ret;
|
||||
list_for_each_entry(p, &ftrace_commands, list) {
|
||||
if (strcmp(p->name, command) == 0)
|
||||
return p->func(tr, hash, func, command, next, enable);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -5722,12 +5691,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
|
||||
parser->idx, enable);
|
||||
trace_parser_clear(parser);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = read;
|
||||
out:
|
||||
return ret;
|
||||
return read;
|
||||
}
|
||||
|
||||
ssize_t
|
||||
@ -8287,7 +8254,7 @@ pid_write(struct file *filp, const char __user *ubuf,
|
||||
if (!cnt)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
guard(mutex)(&ftrace_lock);
|
||||
|
||||
switch (type) {
|
||||
case TRACE_PIDS:
|
||||
@ -8303,14 +8270,13 @@ pid_write(struct file *filp, const char __user *ubuf,
|
||||
lockdep_is_held(&ftrace_lock));
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
WARN_ON_ONCE(1);
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
switch (type) {
|
||||
case TRACE_PIDS:
|
||||
@ -8339,11 +8305,8 @@ pid_write(struct file *filp, const char __user *ubuf,
|
||||
|
||||
ftrace_update_pid_func();
|
||||
ftrace_startup_all(0);
|
||||
out:
|
||||
mutex_unlock(&ftrace_lock);
|
||||
|
||||
if (ret > 0)
|
||||
*ppos += ret;
|
||||
*ppos += ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -8746,17 +8709,17 @@ static int
|
||||
ftrace_enable_sysctl(const struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret = -ENODEV;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ftrace_lock);
|
||||
guard(mutex)(&ftrace_lock);
|
||||
|
||||
if (unlikely(ftrace_disabled))
|
||||
goto out;
|
||||
return -ENODEV;
|
||||
|
||||
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||
|
||||
if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
if (ftrace_enabled) {
|
||||
|
||||
@ -8770,8 +8733,7 @@ ftrace_enable_sysctl(const struct ctl_table *table, int write,
|
||||
} else {
|
||||
if (is_permanent_ops_registered()) {
|
||||
ftrace_enabled = true;
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* stopping ftrace calls (just send to ftrace_stub) */
|
||||
@ -8781,9 +8743,7 @@ ftrace_enable_sysctl(const struct ctl_table *table, int write,
|
||||
}
|
||||
|
||||
last_ftrace_enabled = !!ftrace_enabled;
|
||||
out:
|
||||
mutex_unlock(&ftrace_lock);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ctl_table ftrace_sysctls[] = {
|
||||
|
@ -25,30 +25,9 @@ menuconfig RV
|
||||
For further information, see:
|
||||
Documentation/trace/rv/runtime-verification.rst
|
||||
|
||||
config RV_MON_WIP
|
||||
depends on RV
|
||||
depends on PREEMPT_TRACER
|
||||
select DA_MON_EVENTS_IMPLICIT
|
||||
bool "wip monitor"
|
||||
help
|
||||
Enable wip (wakeup in preemptive) sample monitor that illustrates
|
||||
the usage of per-cpu monitors, and one limitation of the
|
||||
preempt_disable/enable events.
|
||||
|
||||
For further information, see:
|
||||
Documentation/trace/rv/monitor_wip.rst
|
||||
|
||||
config RV_MON_WWNR
|
||||
depends on RV
|
||||
select DA_MON_EVENTS_ID
|
||||
bool "wwnr monitor"
|
||||
help
|
||||
Enable wwnr (wakeup while not running) sample monitor, this is a
|
||||
sample monitor that illustrates the usage of per-task monitor.
|
||||
The model is borken on purpose: it serves to test reactors.
|
||||
|
||||
For further information, see:
|
||||
Documentation/trace/rv/monitor_wwnr.rst
|
||||
source "kernel/trace/rv/monitors/wip/Kconfig"
|
||||
source "kernel/trace/rv/monitors/wwnr/Kconfig"
|
||||
# Add new monitors here
|
||||
|
||||
config RV_REACTORS
|
||||
bool "Runtime verification reactors"
|
||||
|
@ -1,8 +1,11 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
ccflags-y += -I $(src) # needed for trace events
|
||||
|
||||
obj-$(CONFIG_RV) += rv.o
|
||||
obj-$(CONFIG_RV_MON_WIP) += monitors/wip/wip.o
|
||||
obj-$(CONFIG_RV_MON_WWNR) += monitors/wwnr/wwnr.o
|
||||
# Add new monitors here
|
||||
obj-$(CONFIG_RV_REACTORS) += rv_reactors.o
|
||||
obj-$(CONFIG_RV_REACT_PRINTK) += reactor_printk.o
|
||||
obj-$(CONFIG_RV_REACT_PANIC) += reactor_panic.o
|
||||
|
12
kernel/trace/rv/monitors/wip/Kconfig
Normal file
12
kernel/trace/rv/monitors/wip/Kconfig
Normal file
@ -0,0 +1,12 @@
|
||||
config RV_MON_WIP
|
||||
depends on RV
|
||||
depends on PREEMPT_TRACER
|
||||
select DA_MON_EVENTS_IMPLICIT
|
||||
bool "wip monitor"
|
||||
help
|
||||
Enable wip (wakeup in preemptive) sample monitor that illustrates
|
||||
the usage of per-cpu monitors, and one limitation of the
|
||||
preempt_disable/enable events.
|
||||
|
||||
For further information, see:
|
||||
Documentation/trace/rv/monitor_wip.rst
|
@ -10,7 +10,7 @@
|
||||
|
||||
#define MODULE_NAME "wip"
|
||||
|
||||
#include <trace/events/rv.h>
|
||||
#include <rv_trace.h>
|
||||
#include <trace/events/sched.h>
|
||||
#include <trace/events/preemptirq.h>
|
||||
|
||||
|
15
kernel/trace/rv/monitors/wip/wip_trace.h
Normal file
15
kernel/trace/rv/monitors/wip/wip_trace.h
Normal file
@ -0,0 +1,15 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
/*
|
||||
* Snippet to be included in rv_trace.h
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_RV_MON_WIP
|
||||
DEFINE_EVENT(event_da_monitor, event_wip,
|
||||
TP_PROTO(char *state, char *event, char *next_state, bool final_state),
|
||||
TP_ARGS(state, event, next_state, final_state));
|
||||
|
||||
DEFINE_EVENT(error_da_monitor, error_wip,
|
||||
TP_PROTO(char *state, char *event),
|
||||
TP_ARGS(state, event));
|
||||
#endif /* CONFIG_RV_MON_WIP */
|
11
kernel/trace/rv/monitors/wwnr/Kconfig
Normal file
11
kernel/trace/rv/monitors/wwnr/Kconfig
Normal file
@ -0,0 +1,11 @@
|
||||
config RV_MON_WWNR
|
||||
depends on RV
|
||||
select DA_MON_EVENTS_ID
|
||||
bool "wwnr monitor"
|
||||
help
|
||||
Enable wwnr (wakeup while not running) sample monitor, this is a
|
||||
sample monitor that illustrates the usage of per-task monitor.
|
||||
The model is borken on purpose: it serves to test reactors.
|
||||
|
||||
For further information, see:
|
||||
Documentation/trace/rv/monitor_wwnr.rst
|
@ -10,7 +10,7 @@
|
||||
|
||||
#define MODULE_NAME "wwnr"
|
||||
|
||||
#include <trace/events/rv.h>
|
||||
#include <rv_trace.h>
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
#include "wwnr.h"
|
||||
|
16
kernel/trace/rv/monitors/wwnr/wwnr_trace.h
Normal file
16
kernel/trace/rv/monitors/wwnr/wwnr_trace.h
Normal file
@ -0,0 +1,16 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
/*
|
||||
* Snippet to be included in rv_trace.h
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_RV_MON_WWNR
|
||||
/* id is the pid of the task */
|
||||
DEFINE_EVENT(event_da_monitor_id, event_wwnr,
|
||||
TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
|
||||
TP_ARGS(id, state, event, next_state, final_state));
|
||||
|
||||
DEFINE_EVENT(error_da_monitor_id, error_wwnr,
|
||||
TP_PROTO(int id, char *state, char *event),
|
||||
TP_ARGS(id, state, event));
|
||||
#endif /* CONFIG_RV_MON_WWNR */
|
@ -145,7 +145,7 @@
|
||||
|
||||
#ifdef CONFIG_DA_MON_EVENTS
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/rv.h>
|
||||
#include <rv_trace.h>
|
||||
#endif
|
||||
|
||||
#include "rv.h"
|
||||
|
@ -57,15 +57,9 @@ DECLARE_EVENT_CLASS(error_da_monitor,
|
||||
__entry->state)
|
||||
);
|
||||
|
||||
#ifdef CONFIG_RV_MON_WIP
|
||||
DEFINE_EVENT(event_da_monitor, event_wip,
|
||||
TP_PROTO(char *state, char *event, char *next_state, bool final_state),
|
||||
TP_ARGS(state, event, next_state, final_state));
|
||||
#include <monitors/wip/wip_trace.h>
|
||||
// Add new monitors based on CONFIG_DA_MON_EVENTS_IMPLICIT here
|
||||
|
||||
DEFINE_EVENT(error_da_monitor, error_wip,
|
||||
TP_PROTO(char *state, char *event),
|
||||
TP_ARGS(state, event));
|
||||
#endif /* CONFIG_RV_MON_WIP */
|
||||
#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */
|
||||
|
||||
#ifdef CONFIG_DA_MON_EVENTS_ID
|
||||
@ -123,20 +117,14 @@ DECLARE_EVENT_CLASS(error_da_monitor_id,
|
||||
__entry->state)
|
||||
);
|
||||
|
||||
#ifdef CONFIG_RV_MON_WWNR
|
||||
/* id is the pid of the task */
|
||||
DEFINE_EVENT(event_da_monitor_id, event_wwnr,
|
||||
TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
|
||||
TP_ARGS(id, state, event, next_state, final_state));
|
||||
|
||||
DEFINE_EVENT(error_da_monitor_id, error_wwnr,
|
||||
TP_PROTO(int id, char *state, char *event),
|
||||
TP_ARGS(id, state, event));
|
||||
#endif /* CONFIG_RV_MON_WWNR */
|
||||
#include <monitors/wwnr/wwnr_trace.h>
|
||||
// Add new monitors based on CONFIG_DA_MON_EVENTS_ID here
|
||||
|
||||
#endif /* CONFIG_DA_MON_EVENTS_ID */
|
||||
#endif /* _TRACE_RV_H */
|
||||
|
||||
/* This part ust be outside protection */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE rv_trace
|
||||
#include <trace/define_trace.h>
|
@ -26,6 +26,7 @@
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/module.h>
|
||||
@ -535,19 +536,16 @@ LIST_HEAD(ftrace_trace_arrays);
|
||||
int trace_array_get(struct trace_array *this_tr)
|
||||
{
|
||||
struct trace_array *tr;
|
||||
int ret = -ENODEV;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
||||
if (tr == this_tr) {
|
||||
tr->ref++;
|
||||
ret = 0;
|
||||
break;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void __trace_array_put(struct trace_array *this_tr)
|
||||
@ -1443,22 +1441,20 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
|
||||
int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
|
||||
cond_update_fn_t update)
|
||||
{
|
||||
struct cond_snapshot *cond_snapshot;
|
||||
int ret = 0;
|
||||
struct cond_snapshot *cond_snapshot __free(kfree) =
|
||||
kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
|
||||
int ret;
|
||||
|
||||
cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
|
||||
if (!cond_snapshot)
|
||||
return -ENOMEM;
|
||||
|
||||
cond_snapshot->cond_data = cond_data;
|
||||
cond_snapshot->update = update;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
if (tr->current_trace->use_max_tr) {
|
||||
ret = -EBUSY;
|
||||
goto fail_unlock;
|
||||
}
|
||||
if (tr->current_trace->use_max_tr)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* The cond_snapshot can only change to NULL without the
|
||||
@ -1468,29 +1464,20 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
|
||||
* do safely with only holding the trace_types_lock and not
|
||||
* having to take the max_lock.
|
||||
*/
|
||||
if (tr->cond_snapshot) {
|
||||
ret = -EBUSY;
|
||||
goto fail_unlock;
|
||||
}
|
||||
if (tr->cond_snapshot)
|
||||
return -EBUSY;
|
||||
|
||||
ret = tracing_arm_snapshot_locked(tr);
|
||||
if (ret)
|
||||
goto fail_unlock;
|
||||
return ret;
|
||||
|
||||
local_irq_disable();
|
||||
arch_spin_lock(&tr->max_lock);
|
||||
tr->cond_snapshot = cond_snapshot;
|
||||
tr->cond_snapshot = no_free_ptr(cond_snapshot);
|
||||
arch_spin_unlock(&tr->max_lock);
|
||||
local_irq_enable();
|
||||
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
|
||||
fail_unlock:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
kfree(cond_snapshot);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
|
||||
|
||||
@ -2203,10 +2190,10 @@ static __init int init_trace_selftests(void)
|
||||
|
||||
selftests_can_run = true;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
if (list_empty(&postponed_selftests))
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
pr_info("Running postponed tracer tests:\n");
|
||||
|
||||
@ -2235,9 +2222,6 @@ static __init int init_trace_selftests(void)
|
||||
}
|
||||
tracing_selftest_running = false;
|
||||
|
||||
out:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(init_trace_selftests);
|
||||
@ -2807,7 +2791,7 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
|
||||
int save_tracepoint_printk;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&tracepoint_printk_mutex);
|
||||
guard(mutex)(&tracepoint_printk_mutex);
|
||||
save_tracepoint_printk = tracepoint_printk;
|
||||
|
||||
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||
@ -2820,16 +2804,13 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
|
||||
tracepoint_printk = 0;
|
||||
|
||||
if (save_tracepoint_printk == tracepoint_printk)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
if (tracepoint_printk)
|
||||
static_key_enable(&tracepoint_printk_key.key);
|
||||
else
|
||||
static_key_disable(&tracepoint_printk_key.key);
|
||||
|
||||
out:
|
||||
mutex_unlock(&tracepoint_printk_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -5126,7 +5107,8 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
|
||||
u32 tracer_flags;
|
||||
int i;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
tracer_flags = tr->current_trace->flags->val;
|
||||
trace_opts = tr->current_trace->flags->opts;
|
||||
|
||||
@ -5143,7 +5125,6 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
|
||||
else
|
||||
seq_printf(m, "no%s\n", trace_opts[i].name);
|
||||
}
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -5808,7 +5789,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&trace_eval_mutex);
|
||||
guard(mutex)(&trace_eval_mutex);
|
||||
|
||||
if (!trace_eval_maps)
|
||||
trace_eval_maps = map_array;
|
||||
@ -5832,8 +5813,6 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
|
||||
map_array++;
|
||||
}
|
||||
memset(map_array, 0, sizeof(*map_array));
|
||||
|
||||
mutex_unlock(&trace_eval_mutex);
|
||||
}
|
||||
|
||||
static void trace_create_eval_file(struct dentry *d_tracer)
|
||||
@ -5997,23 +5976,18 @@ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
if (cpu_id != RING_BUFFER_ALL_CPUS) {
|
||||
/* make sure, this cpu is enabled in the mask */
|
||||
if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
|
||||
if (ret < 0)
|
||||
ret = -ENOMEM;
|
||||
|
||||
out:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -6105,9 +6079,9 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
bool had_max_tr;
|
||||
#endif
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
update_last_data(tr);
|
||||
|
||||
@ -6115,7 +6089,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
||||
ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
|
||||
RING_BUFFER_ALL_CPUS);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
return ret;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@ -6123,43 +6097,37 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
||||
if (strcmp(t->name, buf) == 0)
|
||||
break;
|
||||
}
|
||||
if (!t) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (!t)
|
||||
return -EINVAL;
|
||||
|
||||
if (t == tr->current_trace)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_TRACER_SNAPSHOT
|
||||
if (t->use_max_tr) {
|
||||
local_irq_disable();
|
||||
arch_spin_lock(&tr->max_lock);
|
||||
if (tr->cond_snapshot)
|
||||
ret = -EBUSY;
|
||||
ret = tr->cond_snapshot ? -EBUSY : 0;
|
||||
arch_spin_unlock(&tr->max_lock);
|
||||
local_irq_enable();
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
/* Some tracers won't work on kernel command line */
|
||||
if (system_state < SYSTEM_RUNNING && t->noboot) {
|
||||
pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
|
||||
t->name);
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Some tracers are only allowed for the top level buffer */
|
||||
if (!trace_ok_for_array(t, tr)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (!trace_ok_for_array(t, tr))
|
||||
return -EINVAL;
|
||||
|
||||
/* If trace pipe files are being read, we can't change the tracer */
|
||||
if (tr->trace_ref) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
if (tr->trace_ref)
|
||||
return -EBUSY;
|
||||
|
||||
trace_branch_disable();
|
||||
|
||||
@ -6190,7 +6158,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
||||
if (!had_max_tr && t->use_max_tr) {
|
||||
ret = tracing_arm_snapshot_locked(tr);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
tr->current_trace = &nop_trace;
|
||||
@ -6203,17 +6171,15 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
|
||||
if (t->use_max_tr)
|
||||
tracing_disarm_snapshot(tr);
|
||||
#endif
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
tr->current_trace = t;
|
||||
tr->current_trace->enabled++;
|
||||
trace_branch_enable(tr);
|
||||
out:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -6291,22 +6257,18 @@ tracing_thresh_write(struct file *filp, const char __user *ubuf,
|
||||
struct trace_array *tr = filp->private_data;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
if (tr->current_trace->update_thresh) {
|
||||
ret = tr->current_trace->update_thresh(tr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = cnt;
|
||||
out:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
return cnt;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
@ -6525,31 +6487,29 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
||||
* This is just a matter of traces coherency, the ring buffer itself
|
||||
* is protected.
|
||||
*/
|
||||
mutex_lock(&iter->mutex);
|
||||
guard(mutex)(&iter->mutex);
|
||||
|
||||
/* return any leftover data */
|
||||
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
|
||||
if (sret != -EBUSY)
|
||||
goto out;
|
||||
return sret;
|
||||
|
||||
trace_seq_init(&iter->seq);
|
||||
|
||||
if (iter->trace->read) {
|
||||
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
|
||||
if (sret)
|
||||
goto out;
|
||||
return sret;
|
||||
}
|
||||
|
||||
waitagain:
|
||||
sret = tracing_wait_pipe(filp);
|
||||
if (sret <= 0)
|
||||
goto out;
|
||||
return sret;
|
||||
|
||||
/* stop when tracing is finished */
|
||||
if (trace_empty(iter)) {
|
||||
sret = 0;
|
||||
goto out;
|
||||
}
|
||||
if (trace_empty(iter))
|
||||
return 0;
|
||||
|
||||
if (cnt >= TRACE_SEQ_BUFFER_SIZE)
|
||||
cnt = TRACE_SEQ_BUFFER_SIZE - 1;
|
||||
@ -6613,9 +6573,6 @@ waitagain:
|
||||
if (sret == -EBUSY)
|
||||
goto waitagain;
|
||||
|
||||
out:
|
||||
mutex_unlock(&iter->mutex);
|
||||
|
||||
return sret;
|
||||
}
|
||||
|
||||
@ -7207,25 +7164,19 @@ u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_eve
|
||||
*/
|
||||
int tracing_set_filter_buffering(struct trace_array *tr, bool set)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
if (set && tr->no_filter_buffering_ref++)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
if (!set) {
|
||||
if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
|
||||
return -EINVAL;
|
||||
|
||||
--tr->no_filter_buffering_ref;
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ftrace_buffer_info {
|
||||
@ -7301,12 +7252,10 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
if (tr->current_trace->use_max_tr) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
if (tr->current_trace->use_max_tr)
|
||||
return -EBUSY;
|
||||
|
||||
local_irq_disable();
|
||||
arch_spin_lock(&tr->max_lock);
|
||||
@ -7315,24 +7264,20 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
arch_spin_unlock(&tr->max_lock);
|
||||
local_irq_enable();
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
switch (val) {
|
||||
case 0:
|
||||
if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
|
||||
return -EINVAL;
|
||||
if (tr->allocated_snapshot)
|
||||
free_snapshot(tr);
|
||||
break;
|
||||
case 1:
|
||||
/* Only allow per-cpu swap if the ring buffer supports it */
|
||||
#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
|
||||
if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
if (tr->allocated_snapshot)
|
||||
ret = resize_buffer_duplicate_size(&tr->max_buffer,
|
||||
@ -7340,7 +7285,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
|
||||
ret = tracing_arm_snapshot_locked(tr);
|
||||
if (ret)
|
||||
break;
|
||||
return ret;
|
||||
|
||||
/* Now, we're going to swap */
|
||||
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
|
||||
@ -7367,8 +7312,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
*ppos += cnt;
|
||||
ret = cnt;
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -7754,12 +7698,11 @@ void tracing_log_err(struct trace_array *tr,
|
||||
|
||||
len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
|
||||
|
||||
mutex_lock(&tracing_err_log_lock);
|
||||
guard(mutex)(&tracing_err_log_lock);
|
||||
|
||||
err = get_tracing_log_err(tr, len);
|
||||
if (PTR_ERR(err) == -ENOMEM) {
|
||||
mutex_unlock(&tracing_err_log_lock);
|
||||
if (PTR_ERR(err) == -ENOMEM)
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
|
||||
snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
|
||||
@ -7770,7 +7713,6 @@ void tracing_log_err(struct trace_array *tr,
|
||||
err->info.ts = local_clock();
|
||||
|
||||
list_add_tail(&err->list, &tr->err_log);
|
||||
mutex_unlock(&tracing_err_log_lock);
|
||||
}
|
||||
|
||||
static void clear_tracing_err_log(struct trace_array *tr)
|
||||
@ -9514,20 +9456,17 @@ static int instance_mkdir(const char *name)
|
||||
struct trace_array *tr;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&event_mutex);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
ret = -EEXIST;
|
||||
if (trace_array_find(name))
|
||||
goto out_unlock;
|
||||
return -EEXIST;
|
||||
|
||||
tr = trace_array_create(name);
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(tr);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
mutex_unlock(&event_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -9577,24 +9516,23 @@ struct trace_array *trace_array_get_by_name(const char *name, const char *system
|
||||
{
|
||||
struct trace_array *tr;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&event_mutex);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
||||
if (tr->name && strcmp(tr->name, name) == 0)
|
||||
goto out_unlock;
|
||||
if (tr->name && strcmp(tr->name, name) == 0) {
|
||||
tr->ref++;
|
||||
return tr;
|
||||
}
|
||||
}
|
||||
|
||||
tr = trace_array_create_systems(name, systems, 0, 0);
|
||||
|
||||
if (IS_ERR(tr))
|
||||
tr = NULL;
|
||||
out_unlock:
|
||||
if (tr)
|
||||
else
|
||||
tr->ref++;
|
||||
|
||||
mutex_unlock(&trace_types_lock);
|
||||
mutex_unlock(&event_mutex);
|
||||
return tr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_array_get_by_name);
|
||||
@ -9645,48 +9583,36 @@ static int __remove_instance(struct trace_array *tr)
|
||||
int trace_array_destroy(struct trace_array *this_tr)
|
||||
{
|
||||
struct trace_array *tr;
|
||||
int ret;
|
||||
|
||||
if (!this_tr)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&event_mutex);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
ret = -ENODEV;
|
||||
|
||||
/* Making sure trace array exists before destroying it. */
|
||||
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
||||
if (tr == this_tr) {
|
||||
ret = __remove_instance(tr);
|
||||
break;
|
||||
}
|
||||
if (tr == this_tr)
|
||||
return __remove_instance(tr);
|
||||
}
|
||||
|
||||
mutex_unlock(&trace_types_lock);
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
return -ENODEV;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_array_destroy);
|
||||
|
||||
static int instance_rmdir(const char *name)
|
||||
{
|
||||
struct trace_array *tr;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&event_mutex);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
ret = -ENODEV;
|
||||
tr = trace_array_find(name);
|
||||
if (tr)
|
||||
ret = __remove_instance(tr);
|
||||
if (!tr)
|
||||
return -ENODEV;
|
||||
|
||||
mutex_unlock(&trace_types_lock);
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
return __remove_instance(tr);
|
||||
}
|
||||
|
||||
static __init void create_trace_instances(struct dentry *d_tracer)
|
||||
@ -9699,19 +9625,16 @@ static __init void create_trace_instances(struct dentry *d_tracer)
|
||||
if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
|
||||
return;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&event_mutex);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
|
||||
if (!tr->name)
|
||||
continue;
|
||||
if (MEM_FAIL(trace_array_create_dir(tr) < 0,
|
||||
"Failed to create instance directory\n"))
|
||||
break;
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_unlock(&trace_types_lock);
|
||||
mutex_unlock(&event_mutex);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -9925,7 +9848,7 @@ static void trace_module_remove_evals(struct module *mod)
|
||||
if (!mod->num_trace_evals)
|
||||
return;
|
||||
|
||||
mutex_lock(&trace_eval_mutex);
|
||||
guard(mutex)(&trace_eval_mutex);
|
||||
|
||||
map = trace_eval_maps;
|
||||
|
||||
@ -9937,12 +9860,10 @@ static void trace_module_remove_evals(struct module *mod)
|
||||
map = map->tail.next;
|
||||
}
|
||||
if (!map)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
*last = trace_eval_jmp_to_tail(map)->tail.next;
|
||||
kfree(map);
|
||||
out:
|
||||
mutex_unlock(&trace_eval_mutex);
|
||||
}
|
||||
#else
|
||||
static inline void trace_module_remove_evals(struct module *mod) { }
|
||||
|
@ -74,24 +74,19 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
|
||||
struct dyn_event *pos, *n;
|
||||
char *system = NULL, *event, *p;
|
||||
int argc, ret = -ENOENT;
|
||||
char **argv;
|
||||
char **argv __free(argv_free) = argv_split(GFP_KERNEL, raw_command, &argc);
|
||||
|
||||
argv = argv_split(GFP_KERNEL, raw_command, &argc);
|
||||
if (!argv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (argv[0][0] == '-') {
|
||||
if (argv[0][1] != ':') {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (argv[0][1] != ':')
|
||||
return -EINVAL;
|
||||
event = &argv[0][2];
|
||||
} else {
|
||||
event = strchr(argv[0], ':');
|
||||
if (!event) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (!event)
|
||||
return -EINVAL;
|
||||
event++;
|
||||
}
|
||||
|
||||
@ -101,10 +96,8 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
|
||||
event = p + 1;
|
||||
*p = '\0';
|
||||
}
|
||||
if (!system && event[0] == '\0') {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (!system && event[0] == '\0')
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
for_each_dyn_event_safe(pos, n) {
|
||||
@ -120,8 +113,6 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
|
||||
}
|
||||
tracing_reset_all_online_cpus();
|
||||
mutex_unlock(&event_mutex);
|
||||
out:
|
||||
argv_free(argv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -917,10 +917,10 @@ static int __trace_eprobe_create(int argc, const char *argv[])
|
||||
goto error;
|
||||
}
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
event_call = find_and_get_event(sys_name, sys_event);
|
||||
ep = alloc_event_probe(group, event, event_call, argc - 2);
|
||||
mutex_unlock(&event_mutex);
|
||||
scoped_guard(mutex, &event_mutex) {
|
||||
event_call = find_and_get_event(sys_name, sys_event);
|
||||
ep = alloc_event_probe(group, event, event_call, argc - 2);
|
||||
}
|
||||
|
||||
if (IS_ERR(ep)) {
|
||||
ret = PTR_ERR(ep);
|
||||
@ -952,23 +952,21 @@ static int __trace_eprobe_create(int argc, const char *argv[])
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
init_trace_eprobe_call(ep);
|
||||
mutex_lock(&event_mutex);
|
||||
ret = trace_probe_register_event_call(&ep->tp);
|
||||
if (ret) {
|
||||
if (ret == -EEXIST) {
|
||||
trace_probe_log_set_index(0);
|
||||
trace_probe_log_err(0, EVENT_EXIST);
|
||||
scoped_guard(mutex, &event_mutex) {
|
||||
ret = trace_probe_register_event_call(&ep->tp);
|
||||
if (ret) {
|
||||
if (ret == -EEXIST) {
|
||||
trace_probe_log_set_index(0);
|
||||
trace_probe_log_err(0, EVENT_EXIST);
|
||||
}
|
||||
goto error;
|
||||
}
|
||||
ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
|
||||
if (ret < 0) {
|
||||
trace_probe_unregister_event_call(&ep->tp);
|
||||
goto error;
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
goto error;
|
||||
}
|
||||
ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
|
||||
if (ret < 0) {
|
||||
trace_probe_unregister_event_call(&ep->tp);
|
||||
mutex_unlock(&event_mutex);
|
||||
goto error;
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
return ret;
|
||||
parse_error:
|
||||
ret = -EINVAL;
|
||||
|
@ -1558,21 +1558,20 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
switch (val) {
|
||||
case 0:
|
||||
case 1:
|
||||
ret = -ENODEV;
|
||||
mutex_lock(&event_mutex);
|
||||
file = event_file_file(filp);
|
||||
if (likely(file)) {
|
||||
ret = tracing_update_buffers(file->tr);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&event_mutex);
|
||||
return ret;
|
||||
}
|
||||
ret = ftrace_event_enable_disable(file, val);
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
if (!file)
|
||||
return -ENODEV;
|
||||
ret = tracing_update_buffers(file->tr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = ftrace_event_enable_disable(file, val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1581,7 +1580,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
||||
|
||||
*ppos += cnt;
|
||||
|
||||
return ret ? ret : cnt;
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -2157,7 +2156,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
if (type == TRACE_PIDS) {
|
||||
filtered_pids = rcu_dereference_protected(tr->filtered_pids,
|
||||
@ -2173,7 +2172,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
|
||||
|
||||
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
if (type == TRACE_PIDS)
|
||||
rcu_assign_pointer(tr->filtered_pids, pid_list);
|
||||
@ -2198,11 +2197,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
|
||||
*/
|
||||
on_each_cpu(ignore_task_cpu, tr, 1);
|
||||
|
||||
out:
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
if (ret > 0)
|
||||
*ppos += ret;
|
||||
*ppos += ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -3111,6 +3106,20 @@ static bool event_in_systems(struct trace_event_call *call,
|
||||
return !*p || isspace(*p) || *p == ',';
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIST_TRIGGERS
|
||||
/*
|
||||
* Wake up waiter on the hist_poll_wq from irq_work because the hist trigger
|
||||
* may happen in any context.
|
||||
*/
|
||||
static void hist_poll_event_irq_work(struct irq_work *work)
|
||||
{
|
||||
wake_up_all(&hist_poll_wq);
|
||||
}
|
||||
|
||||
DEFINE_IRQ_WORK(hist_poll_work, hist_poll_event_irq_work);
|
||||
DECLARE_WAIT_QUEUE_HEAD(hist_poll_wq);
|
||||
#endif
|
||||
|
||||
static struct trace_event_file *
|
||||
trace_create_new_event(struct trace_event_call *call,
|
||||
struct trace_array *tr)
|
||||
@ -3269,13 +3278,13 @@ int trace_add_event_call(struct trace_event_call *call)
|
||||
int ret;
|
||||
lockdep_assert_held(&event_mutex);
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
ret = __register_event(call, NULL);
|
||||
if (ret >= 0)
|
||||
__add_event_to_tracers(call);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_unlock(&trace_types_lock);
|
||||
__add_event_to_tracers(call);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_add_event_call);
|
||||
@ -3529,30 +3538,21 @@ struct trace_event_file *trace_get_event_file(const char *instance,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
file = find_event_file(tr, system, event);
|
||||
if (!file) {
|
||||
trace_array_put(tr);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
/* Don't let event modules unload while in use */
|
||||
ret = trace_event_try_get_ref(file->event_call);
|
||||
if (!ret) {
|
||||
trace_array_put(tr);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
if (ret)
|
||||
file = ERR_PTR(ret);
|
||||
|
||||
return file;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(trace_get_event_file);
|
||||
@ -3770,6 +3770,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
|
||||
struct trace_event_file *file;
|
||||
struct ftrace_probe_ops *ops;
|
||||
struct event_probe_data *data;
|
||||
unsigned long count = -1;
|
||||
const char *system;
|
||||
const char *event;
|
||||
char *number;
|
||||
@ -3789,12 +3790,11 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
|
||||
|
||||
event = strsep(¶m, ":");
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
ret = -EINVAL;
|
||||
file = find_event_file(tr, system, event);
|
||||
if (!file)
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
|
||||
enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
|
||||
|
||||
@ -3803,74 +3803,62 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
|
||||
else
|
||||
ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
|
||||
|
||||
if (glob[0] == '!') {
|
||||
ret = unregister_ftrace_function_probe_func(glob+1, tr, ops);
|
||||
goto out;
|
||||
if (glob[0] == '!')
|
||||
return unregister_ftrace_function_probe_func(glob+1, tr, ops);
|
||||
|
||||
if (param) {
|
||||
number = strsep(¶m, ":");
|
||||
|
||||
if (!strlen(number))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* We use the callback data field (which is a pointer)
|
||||
* as our counter.
|
||||
*/
|
||||
ret = kstrtoul(number, 0, &count);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
goto out;
|
||||
|
||||
data->enable = enable;
|
||||
data->count = -1;
|
||||
data->file = file;
|
||||
|
||||
if (!param)
|
||||
goto out_reg;
|
||||
|
||||
number = strsep(¶m, ":");
|
||||
|
||||
ret = -EINVAL;
|
||||
if (!strlen(number))
|
||||
goto out_free;
|
||||
|
||||
/*
|
||||
* We use the callback data field (which is a pointer)
|
||||
* as our counter.
|
||||
*/
|
||||
ret = kstrtoul(number, 0, &data->count);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
out_reg:
|
||||
/* Don't let event modules unload while probe registered */
|
||||
ret = trace_event_try_get_ref(file->event_call);
|
||||
if (!ret) {
|
||||
ret = -EBUSY;
|
||||
goto out_free;
|
||||
}
|
||||
if (!ret)
|
||||
return -EBUSY;
|
||||
|
||||
ret = __ftrace_event_enable_disable(file, 1, 1);
|
||||
if (ret < 0)
|
||||
goto out_put;
|
||||
|
||||
ret = -ENOMEM;
|
||||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
goto out_put;
|
||||
|
||||
data->enable = enable;
|
||||
data->count = count;
|
||||
data->file = file;
|
||||
|
||||
ret = register_ftrace_function_probe(glob, tr, ops, data);
|
||||
/*
|
||||
* The above returns on success the # of functions enabled,
|
||||
* but if it didn't find any functions it returns zero.
|
||||
* Consider no functions a failure too.
|
||||
*/
|
||||
if (!ret) {
|
||||
ret = -ENOENT;
|
||||
goto out_disable;
|
||||
} else if (ret < 0)
|
||||
goto out_disable;
|
||||
/* Just return zero, not the number of enabled functions */
|
||||
ret = 0;
|
||||
out:
|
||||
mutex_unlock(&event_mutex);
|
||||
return ret;
|
||||
|
||||
out_disable:
|
||||
/* Just return zero, not the number of enabled functions */
|
||||
if (ret > 0)
|
||||
return 0;
|
||||
|
||||
kfree(data);
|
||||
|
||||
if (!ret)
|
||||
ret = -ENOENT;
|
||||
|
||||
__ftrace_event_enable_disable(file, 0, 1);
|
||||
out_put:
|
||||
trace_event_put_ref(file->event_call);
|
||||
out_free:
|
||||
kfree(data);
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct ftrace_func_command event_enable_cmd = {
|
||||
@ -4093,20 +4081,17 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
ret = create_event_toplevel_files(parent, tr);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
return ret;
|
||||
|
||||
down_write(&trace_event_sem);
|
||||
__trace_early_add_event_dirs(tr);
|
||||
up_write(&trace_event_sem);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Must be called with event_mutex held */
|
||||
|
@ -2405,13 +2405,11 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
|
||||
struct event_filter *filter = NULL;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
/* Make sure the system still has events */
|
||||
if (!dir->nr_events) {
|
||||
err = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!dir->nr_events)
|
||||
return -ENODEV;
|
||||
|
||||
if (!strcmp(strstrip(filter_string), "0")) {
|
||||
filter_free_subsystem_preds(dir, tr);
|
||||
@ -2422,7 +2420,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
|
||||
tracepoint_synchronize_unregister();
|
||||
filter_free_subsystem_filters(dir, tr);
|
||||
__free_filter(filter);
|
||||
goto out_unlock;
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = create_system_filter(dir, filter_string, &filter);
|
||||
@ -2434,8 +2432,6 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
|
||||
__free_filter(system->filter);
|
||||
system->filter = filter;
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -2612,17 +2608,15 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
|
||||
struct event_filter *filter = NULL;
|
||||
struct trace_event_call *call;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
call = event->tp_event;
|
||||
|
||||
err = -EINVAL;
|
||||
if (!call)
|
||||
goto out_unlock;
|
||||
return -EINVAL;
|
||||
|
||||
err = -EEXIST;
|
||||
if (event->filter)
|
||||
goto out_unlock;
|
||||
return -EEXIST;
|
||||
|
||||
err = create_filter(NULL, call, filter_str, false, &filter);
|
||||
if (err)
|
||||
@ -2637,9 +2631,6 @@ free_filter:
|
||||
if (err || ftrace_event_is_function(call))
|
||||
__free_filter(filter);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -5311,6 +5311,8 @@ static void event_hist_trigger(struct event_trigger_data *data,
|
||||
|
||||
if (resolve_var_refs(hist_data, key, var_ref_vals, true))
|
||||
hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals);
|
||||
|
||||
hist_poll_wakeup();
|
||||
}
|
||||
|
||||
static void hist_trigger_stacktrace_print(struct seq_file *m,
|
||||
@ -5590,49 +5592,128 @@ static void hist_trigger_show(struct seq_file *m,
|
||||
n_entries, (u64)atomic64_read(&hist_data->map->drops));
|
||||
}
|
||||
|
||||
struct hist_file_data {
|
||||
struct file *file;
|
||||
u64 last_read;
|
||||
u64 last_act;
|
||||
};
|
||||
|
||||
static u64 get_hist_hit_count(struct trace_event_file *event_file)
|
||||
{
|
||||
struct hist_trigger_data *hist_data;
|
||||
struct event_trigger_data *data;
|
||||
u64 ret = 0;
|
||||
|
||||
list_for_each_entry(data, &event_file->triggers, list) {
|
||||
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) {
|
||||
hist_data = data->private_data;
|
||||
ret += atomic64_read(&hist_data->map->hits);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hist_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct hist_file_data *hist_file = m->private;
|
||||
struct event_trigger_data *data;
|
||||
struct trace_event_file *event_file;
|
||||
int n = 0, ret = 0;
|
||||
int n = 0;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
event_file = event_file_file(m->private);
|
||||
if (unlikely(!event_file)) {
|
||||
ret = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
event_file = event_file_file(hist_file->file);
|
||||
if (unlikely(!event_file))
|
||||
return -ENODEV;
|
||||
|
||||
list_for_each_entry(data, &event_file->triggers, list) {
|
||||
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
|
||||
hist_trigger_show(m, data, n++);
|
||||
}
|
||||
hist_file->last_read = get_hist_hit_count(event_file);
|
||||
/*
|
||||
* Update last_act too so that poll()/POLLPRI can wait for the next
|
||||
* event after any syscall on hist file.
|
||||
*/
|
||||
hist_file->last_act = hist_file->last_read;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&event_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wait)
|
||||
{
|
||||
struct trace_event_file *event_file;
|
||||
struct seq_file *m = file->private_data;
|
||||
struct hist_file_data *hist_file = m->private;
|
||||
__poll_t ret = 0;
|
||||
u64 cnt;
|
||||
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
event_file = event_file_data(file);
|
||||
if (!event_file)
|
||||
return EPOLLERR;
|
||||
|
||||
hist_poll_wait(file, wait);
|
||||
|
||||
cnt = get_hist_hit_count(event_file);
|
||||
if (hist_file->last_read != cnt)
|
||||
ret |= EPOLLIN | EPOLLRDNORM;
|
||||
if (hist_file->last_act != cnt) {
|
||||
hist_file->last_act = cnt;
|
||||
ret |= EPOLLPRI;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int event_hist_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct seq_file *m = file->private_data;
|
||||
struct hist_file_data *hist_file = m->private;
|
||||
|
||||
kfree(hist_file);
|
||||
return tracing_single_release_file_tr(inode, file);
|
||||
}
|
||||
|
||||
static int event_hist_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct trace_event_file *event_file;
|
||||
struct hist_file_data *hist_file;
|
||||
int ret;
|
||||
|
||||
ret = tracing_open_file_tr(inode, file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
event_file = event_file_data(file);
|
||||
if (!event_file)
|
||||
return -ENODEV;
|
||||
|
||||
hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL);
|
||||
if (!hist_file)
|
||||
return -ENOMEM;
|
||||
|
||||
hist_file->file = file;
|
||||
hist_file->last_act = get_hist_hit_count(event_file);
|
||||
|
||||
/* Clear private_data to avoid warning in single_open() */
|
||||
file->private_data = NULL;
|
||||
return single_open(file, hist_show, file);
|
||||
ret = single_open(file, hist_show, hist_file);
|
||||
if (ret)
|
||||
kfree(hist_file);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
const struct file_operations event_hist_fops = {
|
||||
.open = event_hist_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = tracing_single_release_file_tr,
|
||||
.release = event_hist_release,
|
||||
.poll = event_hist_poll,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HIST_TRIGGERS_DEBUG
|
||||
@ -5873,25 +5954,19 @@ static int hist_debug_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct event_trigger_data *data;
|
||||
struct trace_event_file *event_file;
|
||||
int n = 0, ret = 0;
|
||||
int n = 0;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
event_file = event_file_file(m->private);
|
||||
if (unlikely(!event_file)) {
|
||||
ret = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (unlikely(!event_file))
|
||||
return -ENODEV;
|
||||
|
||||
list_for_each_entry(data, &event_file->triggers, list) {
|
||||
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
|
||||
hist_trigger_debug_show(m, data, n++);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int event_hist_debug_open(struct inode *inode, struct file *file)
|
||||
|
@ -49,16 +49,11 @@ static char *last_cmd;
|
||||
|
||||
static int errpos(const char *str)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&lastcmd_mutex);
|
||||
guard(mutex)(&lastcmd_mutex);
|
||||
if (!str || !last_cmd)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
ret = err_pos(last_cmd, str);
|
||||
out:
|
||||
mutex_unlock(&lastcmd_mutex);
|
||||
return ret;
|
||||
return err_pos(last_cmd, str);
|
||||
}
|
||||
|
||||
static void last_cmd_set(const char *str)
|
||||
@ -74,14 +69,12 @@ static void last_cmd_set(const char *str)
|
||||
|
||||
static void synth_err(u8 err_type, u16 err_pos)
|
||||
{
|
||||
mutex_lock(&lastcmd_mutex);
|
||||
guard(mutex)(&lastcmd_mutex);
|
||||
if (!last_cmd)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
|
||||
err_type, err_pos);
|
||||
out:
|
||||
mutex_unlock(&lastcmd_mutex);
|
||||
}
|
||||
|
||||
static int create_synth_event(const char *raw_command);
|
||||
|
@ -211,12 +211,10 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
if (unlikely(!event_file_file(file))) {
|
||||
mutex_unlock(&event_mutex);
|
||||
if (unlikely(!event_file_file(file)))
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if ((file->f_mode & FMODE_WRITE) &&
|
||||
(file->f_flags & O_TRUNC)) {
|
||||
@ -239,8 +237,6 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -248,7 +244,6 @@ int trigger_process_regex(struct trace_event_file *file, char *buff)
|
||||
{
|
||||
char *command, *next;
|
||||
struct event_command *p;
|
||||
int ret = -EINVAL;
|
||||
|
||||
next = buff = skip_spaces(buff);
|
||||
command = strsep(&next, ": \t");
|
||||
@ -259,17 +254,14 @@ int trigger_process_regex(struct trace_event_file *file, char *buff)
|
||||
}
|
||||
command = (command[0] != '!') ? command : command + 1;
|
||||
|
||||
mutex_lock(&trigger_cmd_mutex);
|
||||
list_for_each_entry(p, &trigger_commands, list) {
|
||||
if (strcmp(p->name, command) == 0) {
|
||||
ret = p->parse(p, file, buff, command, next);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&trigger_cmd_mutex);
|
||||
guard(mutex)(&trigger_cmd_mutex);
|
||||
|
||||
return ret;
|
||||
list_for_each_entry(p, &trigger_commands, list) {
|
||||
if (strcmp(p->name, command) == 0)
|
||||
return p->parse(p, file, buff, command, next);
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static ssize_t event_trigger_regex_write(struct file *file,
|
||||
@ -278,7 +270,7 @@ static ssize_t event_trigger_regex_write(struct file *file,
|
||||
{
|
||||
struct trace_event_file *event_file;
|
||||
ssize_t ret;
|
||||
char *buf;
|
||||
char *buf __free(kfree) = NULL;
|
||||
|
||||
if (!cnt)
|
||||
return 0;
|
||||
@ -292,24 +284,18 @@ static ssize_t event_trigger_regex_write(struct file *file,
|
||||
|
||||
strim(buf);
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
event_file = event_file_file(file);
|
||||
if (unlikely(!event_file)) {
|
||||
mutex_unlock(&event_mutex);
|
||||
kfree(buf);
|
||||
return -ENODEV;
|
||||
}
|
||||
ret = trigger_process_regex(event_file, buf);
|
||||
mutex_unlock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
kfree(buf);
|
||||
event_file = event_file_file(file);
|
||||
if (unlikely(!event_file))
|
||||
return -ENODEV;
|
||||
|
||||
ret = trigger_process_regex(event_file, buf);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
*ppos += cnt;
|
||||
ret = cnt;
|
||||
out:
|
||||
return ret;
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static int event_trigger_regex_release(struct inode *inode, struct file *file)
|
||||
@ -359,20 +345,16 @@ const struct file_operations event_trigger_fops = {
|
||||
__init int register_event_command(struct event_command *cmd)
|
||||
{
|
||||
struct event_command *p;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&trigger_cmd_mutex);
|
||||
guard(mutex)(&trigger_cmd_mutex);
|
||||
|
||||
list_for_each_entry(p, &trigger_commands, list) {
|
||||
if (strcmp(cmd->name, p->name) == 0) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (strcmp(cmd->name, p->name) == 0)
|
||||
return -EBUSY;
|
||||
}
|
||||
list_add(&cmd->list, &trigger_commands);
|
||||
out_unlock:
|
||||
mutex_unlock(&trigger_cmd_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -382,20 +364,17 @@ __init int register_event_command(struct event_command *cmd)
|
||||
__init int unregister_event_command(struct event_command *cmd)
|
||||
{
|
||||
struct event_command *p, *n;
|
||||
int ret = -ENODEV;
|
||||
|
||||
mutex_lock(&trigger_cmd_mutex);
|
||||
guard(mutex)(&trigger_cmd_mutex);
|
||||
|
||||
list_for_each_entry_safe(p, n, &trigger_commands, list) {
|
||||
if (strcmp(cmd->name, p->name) == 0) {
|
||||
ret = 0;
|
||||
list_del_init(&p->list);
|
||||
goto out_unlock;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
out_unlock:
|
||||
mutex_unlock(&trigger_cmd_mutex);
|
||||
|
||||
return ret;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -181,10 +181,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
|
||||
struct trace_array *tr = gops->private;
|
||||
struct trace_array_cpu *data;
|
||||
struct fgraph_times *ftimes;
|
||||
unsigned long flags;
|
||||
unsigned int trace_ctx;
|
||||
long disabled;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
int cpu;
|
||||
|
||||
if (*task_var & TRACE_GRAPH_NOTRACE)
|
||||
@ -235,25 +234,21 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
|
||||
if (tracing_thresh)
|
||||
return 1;
|
||||
|
||||
local_irq_save(flags);
|
||||
preempt_disable_notrace();
|
||||
cpu = raw_smp_processor_id();
|
||||
data = per_cpu_ptr(tr->array_buffer.data, cpu);
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
if (likely(disabled == 1)) {
|
||||
trace_ctx = tracing_gen_ctx_flags(flags);
|
||||
if (unlikely(IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
|
||||
tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR))) {
|
||||
disabled = atomic_read(&data->disabled);
|
||||
if (likely(!disabled)) {
|
||||
trace_ctx = tracing_gen_ctx();
|
||||
if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
|
||||
tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
|
||||
unsigned long retaddr = ftrace_graph_top_ret_addr(current);
|
||||
|
||||
ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
|
||||
} else
|
||||
} else {
|
||||
ret = __trace_graph_entry(tr, trace, trace_ctx);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
atomic_dec(&data->disabled);
|
||||
local_irq_restore(flags);
|
||||
preempt_enable_notrace();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -320,7 +315,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
|
||||
struct trace_array *tr = gops->private;
|
||||
struct trace_array_cpu *data;
|
||||
struct fgraph_times *ftimes;
|
||||
unsigned long flags;
|
||||
unsigned int trace_ctx;
|
||||
long disabled;
|
||||
int size;
|
||||
@ -341,16 +335,15 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
|
||||
|
||||
trace->calltime = ftimes->calltime;
|
||||
|
||||
local_irq_save(flags);
|
||||
preempt_disable_notrace();
|
||||
cpu = raw_smp_processor_id();
|
||||
data = per_cpu_ptr(tr->array_buffer.data, cpu);
|
||||
disabled = atomic_inc_return(&data->disabled);
|
||||
if (likely(disabled == 1)) {
|
||||
trace_ctx = tracing_gen_ctx_flags(flags);
|
||||
disabled = atomic_read(&data->disabled);
|
||||
if (likely(!disabled)) {
|
||||
trace_ctx = tracing_gen_ctx();
|
||||
__trace_graph_return(tr, trace, trace_ctx);
|
||||
}
|
||||
atomic_dec(&data->disabled);
|
||||
local_irq_restore(flags);
|
||||
preempt_enable_notrace();
|
||||
}
|
||||
|
||||
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
|
||||
|
@ -634,7 +634,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
|
||||
struct trace_kprobe *old_tk;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
|
||||
trace_probe_group_name(&tk->tp));
|
||||
@ -642,11 +642,9 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
|
||||
if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
|
||||
trace_probe_log_set_index(0);
|
||||
trace_probe_log_err(0, DIFF_PROBE_TYPE);
|
||||
ret = -EEXIST;
|
||||
} else {
|
||||
ret = append_trace_kprobe(tk, old_tk);
|
||||
return -EEXIST;
|
||||
}
|
||||
goto end;
|
||||
return append_trace_kprobe(tk, old_tk);
|
||||
}
|
||||
|
||||
/* Register new event */
|
||||
@ -657,7 +655,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
|
||||
trace_probe_log_err(0, EVENT_EXIST);
|
||||
} else
|
||||
pr_warn("Failed to register probe event(%d)\n", ret);
|
||||
goto end;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Register k*probe */
|
||||
@ -672,8 +670,6 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
|
||||
else
|
||||
dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
|
||||
|
||||
end:
|
||||
mutex_unlock(&event_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -706,7 +702,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Update probes on coming module */
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
for_each_trace_kprobe(tk, pos) {
|
||||
if (trace_kprobe_within_module(tk, mod)) {
|
||||
/* Don't need to check busy - this should have gone. */
|
||||
@ -718,7 +714,6 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
|
||||
module_name(mod), ret);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
@ -1970,13 +1965,12 @@ static __init void enable_boot_kprobe_events(void)
|
||||
struct trace_kprobe *tk;
|
||||
struct dyn_event *pos;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
for_each_trace_kprobe(tk, pos) {
|
||||
list_for_each_entry(file, &tr->events, list)
|
||||
if (file->event_call == trace_probe_event_call(&tk->tp))
|
||||
trace_event_enable_disable(file, 1, 0);
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
}
|
||||
|
||||
static __init void setup_boot_kprobe_events(void)
|
||||
|
@ -2083,26 +2083,21 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
guard(mutex)(&trace_types_lock);
|
||||
|
||||
if (!osnoise_has_registered_instances())
|
||||
goto out_unlock_trace;
|
||||
return;
|
||||
|
||||
mutex_lock(&interface_lock);
|
||||
cpus_read_lock();
|
||||
guard(mutex)(&interface_lock);
|
||||
guard(cpus_read_lock)();
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
goto out_unlock;
|
||||
return;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
|
||||
goto out_unlock;
|
||||
return;
|
||||
|
||||
start_kthread(cpu);
|
||||
|
||||
out_unlock:
|
||||
cpus_read_unlock();
|
||||
mutex_unlock(&interface_lock);
|
||||
out_unlock_trace:
|
||||
mutex_unlock(&trace_types_lock);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
|
||||
@ -2300,31 +2295,22 @@ static ssize_t
|
||||
osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
char *mask_str;
|
||||
char *mask_str __free(kfree) = NULL;
|
||||
int len;
|
||||
|
||||
mutex_lock(&interface_lock);
|
||||
guard(mutex)(&interface_lock);
|
||||
|
||||
len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
|
||||
mask_str = kmalloc(len, GFP_KERNEL);
|
||||
if (!mask_str) {
|
||||
count = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!mask_str)
|
||||
return -ENOMEM;
|
||||
|
||||
len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
|
||||
if (len >= count) {
|
||||
count = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
if (len >= count)
|
||||
return -EINVAL;
|
||||
|
||||
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
|
||||
|
||||
out_free:
|
||||
kfree(mask_str);
|
||||
out_unlock:
|
||||
mutex_unlock(&interface_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -520,20 +520,18 @@ stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
|
||||
int was_enabled;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&stack_sysctl_mutex);
|
||||
guard(mutex)(&stack_sysctl_mutex);
|
||||
was_enabled = !!stack_tracer_enabled;
|
||||
|
||||
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||
|
||||
if (ret || !write || (was_enabled == !!stack_tracer_enabled))
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
if (stack_tracer_enabled)
|
||||
register_ftrace_function(&trace_ops);
|
||||
else
|
||||
unregister_ftrace_function(&trace_ops);
|
||||
out:
|
||||
mutex_unlock(&stack_sysctl_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -128,7 +128,7 @@ static int stat_seq_init(struct stat_session *session)
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&session->stat_mutex);
|
||||
guard(mutex)(&session->stat_mutex);
|
||||
__reset_stat_session(session);
|
||||
|
||||
if (!ts->stat_cmp)
|
||||
@ -136,11 +136,11 @@ static int stat_seq_init(struct stat_session *session)
|
||||
|
||||
stat = ts->stat_start(ts);
|
||||
if (!stat)
|
||||
goto exit;
|
||||
return 0;
|
||||
|
||||
ret = insert_stat(root, stat, ts->stat_cmp);
|
||||
if (ret)
|
||||
goto exit;
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Iterate over the tracer stat entries and store them in an rbtree.
|
||||
@ -157,13 +157,10 @@ static int stat_seq_init(struct stat_session *session)
|
||||
goto exit_free_rbtree;
|
||||
}
|
||||
|
||||
exit:
|
||||
mutex_unlock(&session->stat_mutex);
|
||||
return ret;
|
||||
|
||||
exit_free_rbtree:
|
||||
__reset_stat_session(session);
|
||||
mutex_unlock(&session->stat_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -308,7 +305,7 @@ static int init_stat_file(struct stat_session *session)
|
||||
int register_stat_tracer(struct tracer_stat *trace)
|
||||
{
|
||||
struct stat_session *session, *node;
|
||||
int ret = -EINVAL;
|
||||
int ret;
|
||||
|
||||
if (!trace)
|
||||
return -EINVAL;
|
||||
@ -316,18 +313,18 @@ int register_stat_tracer(struct tracer_stat *trace)
|
||||
if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
|
||||
return -EINVAL;
|
||||
|
||||
guard(mutex)(&all_stat_sessions_mutex);
|
||||
|
||||
/* Already registered? */
|
||||
mutex_lock(&all_stat_sessions_mutex);
|
||||
list_for_each_entry(node, &all_stat_sessions, session_list) {
|
||||
if (node->ts == trace)
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
/* Init the session */
|
||||
session = kzalloc(sizeof(*session), GFP_KERNEL);
|
||||
if (!session)
|
||||
goto out;
|
||||
return -ENOMEM;
|
||||
|
||||
session->ts = trace;
|
||||
INIT_LIST_HEAD(&session->session_list);
|
||||
@ -336,16 +333,13 @@ int register_stat_tracer(struct tracer_stat *trace)
|
||||
ret = init_stat_file(session);
|
||||
if (ret) {
|
||||
destroy_session(session);
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
/* Register */
|
||||
list_add_tail(&session->session_list, &all_stat_sessions);
|
||||
out:
|
||||
mutex_unlock(&all_stat_sessions_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void unregister_stat_tracer(struct tracer_stat *trace)
|
||||
|
@ -498,11 +498,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
|
||||
struct trace_uprobe *old_tu;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
ret = validate_ref_ctr_offset(tu);
|
||||
if (ret)
|
||||
goto end;
|
||||
return ret;
|
||||
|
||||
/* register as an event */
|
||||
old_tu = find_probe_event(trace_probe_name(&tu->tp),
|
||||
@ -511,11 +511,9 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
|
||||
if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
|
||||
trace_probe_log_set_index(0);
|
||||
trace_probe_log_err(0, DIFF_PROBE_TYPE);
|
||||
ret = -EEXIST;
|
||||
} else {
|
||||
ret = append_trace_uprobe(tu, old_tu);
|
||||
return -EEXIST;
|
||||
}
|
||||
goto end;
|
||||
return append_trace_uprobe(tu, old_tu);
|
||||
}
|
||||
|
||||
ret = register_uprobe_event(tu);
|
||||
@ -525,14 +523,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
|
||||
trace_probe_log_err(0, EVENT_EXIST);
|
||||
} else
|
||||
pr_warn("Failed to register probe event(%d)\n", ret);
|
||||
goto end;
|
||||
return ret;
|
||||
}
|
||||
|
||||
dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
|
||||
|
||||
end:
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -64,14 +64,141 @@
|
||||
#define EM_LOONGARCH 258
|
||||
#endif
|
||||
|
||||
typedef union {
|
||||
Elf32_Ehdr e32;
|
||||
Elf64_Ehdr e64;
|
||||
} Elf_Ehdr;
|
||||
|
||||
typedef union {
|
||||
Elf32_Shdr e32;
|
||||
Elf64_Shdr e64;
|
||||
} Elf_Shdr;
|
||||
|
||||
typedef union {
|
||||
Elf32_Sym e32;
|
||||
Elf64_Sym e64;
|
||||
} Elf_Sym;
|
||||
|
||||
static uint32_t (*r)(const uint32_t *);
|
||||
static uint16_t (*r2)(const uint16_t *);
|
||||
static uint64_t (*r8)(const uint64_t *);
|
||||
static void (*w)(uint32_t, uint32_t *);
|
||||
static void (*w2)(uint16_t, uint16_t *);
|
||||
static void (*w8)(uint64_t, uint64_t *);
|
||||
typedef void (*table_sort_t)(char *, int);
|
||||
|
||||
static uint64_t ehdr64_shoff(Elf_Ehdr *ehdr)
|
||||
{
|
||||
return r8(&ehdr->e64.e_shoff);
|
||||
}
|
||||
|
||||
static uint64_t ehdr32_shoff(Elf_Ehdr *ehdr)
|
||||
{
|
||||
return r(&ehdr->e32.e_shoff);
|
||||
}
|
||||
|
||||
#define EHDR_HALF(fn_name) \
|
||||
static uint16_t ehdr64_##fn_name(Elf_Ehdr *ehdr) \
|
||||
{ \
|
||||
return r2(&ehdr->e64.e_##fn_name); \
|
||||
} \
|
||||
\
|
||||
static uint16_t ehdr32_##fn_name(Elf_Ehdr *ehdr) \
|
||||
{ \
|
||||
return r2(&ehdr->e32.e_##fn_name); \
|
||||
}
|
||||
|
||||
EHDR_HALF(shentsize)
|
||||
EHDR_HALF(shstrndx)
|
||||
EHDR_HALF(shnum)
|
||||
|
||||
#define SHDR_WORD(fn_name) \
|
||||
static uint32_t shdr64_##fn_name(Elf_Shdr *shdr) \
|
||||
{ \
|
||||
return r(&shdr->e64.sh_##fn_name); \
|
||||
} \
|
||||
\
|
||||
static uint32_t shdr32_##fn_name(Elf_Shdr *shdr) \
|
||||
{ \
|
||||
return r(&shdr->e32.sh_##fn_name); \
|
||||
}
|
||||
|
||||
#define SHDR_ADDR(fn_name) \
|
||||
static uint64_t shdr64_##fn_name(Elf_Shdr *shdr) \
|
||||
{ \
|
||||
return r8(&shdr->e64.sh_##fn_name); \
|
||||
} \
|
||||
\
|
||||
static uint64_t shdr32_##fn_name(Elf_Shdr *shdr) \
|
||||
{ \
|
||||
return r(&shdr->e32.sh_##fn_name); \
|
||||
}
|
||||
|
||||
#define SHDR_WORD(fn_name) \
|
||||
static uint32_t shdr64_##fn_name(Elf_Shdr *shdr) \
|
||||
{ \
|
||||
return r(&shdr->e64.sh_##fn_name); \
|
||||
} \
|
||||
\
|
||||
static uint32_t shdr32_##fn_name(Elf_Shdr *shdr) \
|
||||
{ \
|
||||
return r(&shdr->e32.sh_##fn_name); \
|
||||
}
|
||||
|
||||
SHDR_ADDR(addr)
|
||||
SHDR_ADDR(offset)
|
||||
SHDR_ADDR(size)
|
||||
SHDR_ADDR(entsize)
|
||||
|
||||
SHDR_WORD(link)
|
||||
SHDR_WORD(name)
|
||||
SHDR_WORD(type)
|
||||
|
||||
#define SYM_ADDR(fn_name) \
|
||||
static uint64_t sym64_##fn_name(Elf_Sym *sym) \
|
||||
{ \
|
||||
return r8(&sym->e64.st_##fn_name); \
|
||||
} \
|
||||
\
|
||||
static uint64_t sym32_##fn_name(Elf_Sym *sym) \
|
||||
{ \
|
||||
return r(&sym->e32.st_##fn_name); \
|
||||
}
|
||||
|
||||
#define SYM_WORD(fn_name) \
|
||||
static uint32_t sym64_##fn_name(Elf_Sym *sym) \
|
||||
{ \
|
||||
return r(&sym->e64.st_##fn_name); \
|
||||
} \
|
||||
\
|
||||
static uint32_t sym32_##fn_name(Elf_Sym *sym) \
|
||||
{ \
|
||||
return r(&sym->e32.st_##fn_name); \
|
||||
}
|
||||
|
||||
#define SYM_HALF(fn_name) \
|
||||
static uint16_t sym64_##fn_name(Elf_Sym *sym) \
|
||||
{ \
|
||||
return r2(&sym->e64.st_##fn_name); \
|
||||
} \
|
||||
\
|
||||
static uint16_t sym32_##fn_name(Elf_Sym *sym) \
|
||||
{ \
|
||||
return r2(&sym->e32.st_##fn_name); \
|
||||
}
|
||||
|
||||
static uint8_t sym64_type(Elf_Sym *sym)
|
||||
{
|
||||
return ELF64_ST_TYPE(sym->e64.st_info);
|
||||
}
|
||||
|
||||
static uint8_t sym32_type(Elf_Sym *sym)
|
||||
{
|
||||
return ELF32_ST_TYPE(sym->e32.st_info);
|
||||
}
|
||||
|
||||
SYM_ADDR(value)
|
||||
SYM_WORD(name)
|
||||
SYM_HALF(shndx)
|
||||
|
||||
/*
|
||||
* Get the whole file as a programming convenience in order to avoid
|
||||
* malloc+lseek+read+free of many pieces. If successful, then mmap
|
||||
@ -146,31 +273,11 @@ static void wbe(uint32_t val, uint32_t *x)
|
||||
put_unaligned_be32(val, x);
|
||||
}
|
||||
|
||||
static void w2be(uint16_t val, uint16_t *x)
|
||||
{
|
||||
put_unaligned_be16(val, x);
|
||||
}
|
||||
|
||||
static void w8be(uint64_t val, uint64_t *x)
|
||||
{
|
||||
put_unaligned_be64(val, x);
|
||||
}
|
||||
|
||||
static void wle(uint32_t val, uint32_t *x)
|
||||
{
|
||||
put_unaligned_le32(val, x);
|
||||
}
|
||||
|
||||
static void w2le(uint16_t val, uint16_t *x)
|
||||
{
|
||||
put_unaligned_le16(val, x);
|
||||
}
|
||||
|
||||
static void w8le(uint64_t val, uint64_t *x)
|
||||
{
|
||||
put_unaligned_le64(val, x);
|
||||
}
|
||||
|
||||
/*
|
||||
* Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
|
||||
* the way to -256..-1, to avoid conflicting with real section
|
||||
@ -195,10 +302,443 @@ static inline unsigned int get_secindex(unsigned int shndx,
|
||||
return r(&symtab_shndx_start[sym_offs]);
|
||||
}
|
||||
|
||||
/* 32 bit and 64 bit are very similar */
|
||||
#include "sorttable.h"
|
||||
#define SORTTABLE_64
|
||||
#include "sorttable.h"
|
||||
static int compare_extable_32(const void *a, const void *b)
|
||||
{
|
||||
Elf32_Addr av = r(a);
|
||||
Elf32_Addr bv = r(b);
|
||||
|
||||
if (av < bv)
|
||||
return -1;
|
||||
return av > bv;
|
||||
}
|
||||
|
||||
static int compare_extable_64(const void *a, const void *b)
|
||||
{
|
||||
Elf64_Addr av = r8(a);
|
||||
Elf64_Addr bv = r8(b);
|
||||
|
||||
if (av < bv)
|
||||
return -1;
|
||||
return av > bv;
|
||||
}
|
||||
|
||||
static inline void *get_index(void *start, int entsize, int index)
|
||||
{
|
||||
return start + (entsize * index);
|
||||
}
|
||||
|
||||
|
||||
static int (*compare_extable)(const void *a, const void *b);
|
||||
static uint64_t (*ehdr_shoff)(Elf_Ehdr *ehdr);
|
||||
static uint16_t (*ehdr_shstrndx)(Elf_Ehdr *ehdr);
|
||||
static uint16_t (*ehdr_shentsize)(Elf_Ehdr *ehdr);
|
||||
static uint16_t (*ehdr_shnum)(Elf_Ehdr *ehdr);
|
||||
static uint64_t (*shdr_addr)(Elf_Shdr *shdr);
|
||||
static uint64_t (*shdr_offset)(Elf_Shdr *shdr);
|
||||
static uint64_t (*shdr_size)(Elf_Shdr *shdr);
|
||||
static uint64_t (*shdr_entsize)(Elf_Shdr *shdr);
|
||||
static uint32_t (*shdr_link)(Elf_Shdr *shdr);
|
||||
static uint32_t (*shdr_name)(Elf_Shdr *shdr);
|
||||
static uint32_t (*shdr_type)(Elf_Shdr *shdr);
|
||||
static uint8_t (*sym_type)(Elf_Sym *sym);
|
||||
static uint32_t (*sym_name)(Elf_Sym *sym);
|
||||
static uint64_t (*sym_value)(Elf_Sym *sym);
|
||||
static uint16_t (*sym_shndx)(Elf_Sym *sym);
|
||||
|
||||
static int extable_ent_size;
|
||||
static int long_size;
|
||||
|
||||
|
||||
#ifdef UNWINDER_ORC_ENABLED
|
||||
/* ORC unwinder only support X86_64 */
|
||||
#include <asm/orc_types.h>
|
||||
|
||||
#define ERRSTR_MAXSZ 256
|
||||
|
||||
static char g_err[ERRSTR_MAXSZ];
|
||||
static int *g_orc_ip_table;
|
||||
static struct orc_entry *g_orc_table;
|
||||
|
||||
static pthread_t orc_sort_thread;
|
||||
|
||||
static inline unsigned long orc_ip(const int *ip)
|
||||
{
|
||||
return (unsigned long)ip + *ip;
|
||||
}
|
||||
|
||||
static int orc_sort_cmp(const void *_a, const void *_b)
|
||||
{
|
||||
struct orc_entry *orc_a, *orc_b;
|
||||
const int *a = g_orc_ip_table + *(int *)_a;
|
||||
const int *b = g_orc_ip_table + *(int *)_b;
|
||||
unsigned long a_val = orc_ip(a);
|
||||
unsigned long b_val = orc_ip(b);
|
||||
|
||||
if (a_val > b_val)
|
||||
return 1;
|
||||
if (a_val < b_val)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* The "weak" section terminator entries need to always be on the left
|
||||
* to ensure the lookup code skips them in favor of real entries.
|
||||
* These terminator entries exist to handle any gaps created by
|
||||
* whitelisted .o files which didn't get objtool generation.
|
||||
*/
|
||||
orc_a = g_orc_table + (a - g_orc_ip_table);
|
||||
orc_b = g_orc_table + (b - g_orc_ip_table);
|
||||
if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED)
|
||||
return 0;
|
||||
return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
|
||||
}
|
||||
|
||||
static void *sort_orctable(void *arg)
|
||||
{
|
||||
int i;
|
||||
int *idxs = NULL;
|
||||
int *tmp_orc_ip_table = NULL;
|
||||
struct orc_entry *tmp_orc_table = NULL;
|
||||
unsigned int *orc_ip_size = (unsigned int *)arg;
|
||||
unsigned int num_entries = *orc_ip_size / sizeof(int);
|
||||
unsigned int orc_size = num_entries * sizeof(struct orc_entry);
|
||||
|
||||
idxs = (int *)malloc(*orc_ip_size);
|
||||
if (!idxs) {
|
||||
snprintf(g_err, ERRSTR_MAXSZ, "malloc idxs: %s",
|
||||
strerror(errno));
|
||||
pthread_exit(g_err);
|
||||
}
|
||||
|
||||
tmp_orc_ip_table = (int *)malloc(*orc_ip_size);
|
||||
if (!tmp_orc_ip_table) {
|
||||
snprintf(g_err, ERRSTR_MAXSZ, "malloc tmp_orc_ip_table: %s",
|
||||
strerror(errno));
|
||||
pthread_exit(g_err);
|
||||
}
|
||||
|
||||
tmp_orc_table = (struct orc_entry *)malloc(orc_size);
|
||||
if (!tmp_orc_table) {
|
||||
snprintf(g_err, ERRSTR_MAXSZ, "malloc tmp_orc_table: %s",
|
||||
strerror(errno));
|
||||
pthread_exit(g_err);
|
||||
}
|
||||
|
||||
/* initialize indices array, convert ip_table to absolute address */
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
idxs[i] = i;
|
||||
tmp_orc_ip_table[i] = g_orc_ip_table[i] + i * sizeof(int);
|
||||
}
|
||||
memcpy(tmp_orc_table, g_orc_table, orc_size);
|
||||
|
||||
qsort(idxs, num_entries, sizeof(int), orc_sort_cmp);
|
||||
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
if (idxs[i] == i)
|
||||
continue;
|
||||
|
||||
/* convert back to relative address */
|
||||
g_orc_ip_table[i] = tmp_orc_ip_table[idxs[i]] - i * sizeof(int);
|
||||
g_orc_table[i] = tmp_orc_table[idxs[i]];
|
||||
}
|
||||
|
||||
free(idxs);
|
||||
free(tmp_orc_ip_table);
|
||||
free(tmp_orc_table);
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef MCOUNT_SORT_ENABLED
|
||||
static pthread_t mcount_sort_thread;
|
||||
|
||||
struct elf_mcount_loc {
|
||||
Elf_Ehdr *ehdr;
|
||||
Elf_Shdr *init_data_sec;
|
||||
uint64_t start_mcount_loc;
|
||||
uint64_t stop_mcount_loc;
|
||||
};
|
||||
|
||||
/* Sort the addresses stored between __start_mcount_loc to __stop_mcount_loc in vmlinux */
|
||||
static void *sort_mcount_loc(void *arg)
|
||||
{
|
||||
struct elf_mcount_loc *emloc = (struct elf_mcount_loc *)arg;
|
||||
uint64_t offset = emloc->start_mcount_loc - shdr_addr(emloc->init_data_sec)
|
||||
+ shdr_offset(emloc->init_data_sec);
|
||||
uint64_t count = emloc->stop_mcount_loc - emloc->start_mcount_loc;
|
||||
unsigned char *start_loc = (void *)emloc->ehdr + offset;
|
||||
|
||||
qsort(start_loc, count/long_size, long_size, compare_extable);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Get the address of __start_mcount_loc and __stop_mcount_loc in System.map */
|
||||
static void get_mcount_loc(struct elf_mcount_loc *emloc, Elf_Shdr *symtab_sec,
|
||||
const char *strtab)
|
||||
{
|
||||
Elf_Sym *sym, *end_sym;
|
||||
int symentsize = shdr_entsize(symtab_sec);
|
||||
int found = 0;
|
||||
|
||||
sym = (void *)emloc->ehdr + shdr_offset(symtab_sec);
|
||||
end_sym = (void *)sym + shdr_size(symtab_sec);
|
||||
|
||||
while (sym < end_sym) {
|
||||
if (!strcmp(strtab + sym_name(sym), "__start_mcount_loc")) {
|
||||
emloc->start_mcount_loc = sym_value(sym);
|
||||
if (++found == 2)
|
||||
break;
|
||||
} else if (!strcmp(strtab + sym_name(sym), "__stop_mcount_loc")) {
|
||||
emloc->stop_mcount_loc = sym_value(sym);
|
||||
if (++found == 2)
|
||||
break;
|
||||
}
|
||||
sym = (void *)sym + symentsize;
|
||||
}
|
||||
|
||||
if (!emloc->start_mcount_loc) {
|
||||
fprintf(stderr, "get start_mcount_loc error!");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!emloc->stop_mcount_loc) {
|
||||
fprintf(stderr, "get stop_mcount_loc error!");
|
||||
return;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static int do_sort(Elf_Ehdr *ehdr,
|
||||
char const *const fname,
|
||||
table_sort_t custom_sort)
|
||||
{
|
||||
int rc = -1;
|
||||
Elf_Shdr *shdr_start;
|
||||
Elf_Shdr *strtab_sec = NULL;
|
||||
Elf_Shdr *symtab_sec = NULL;
|
||||
Elf_Shdr *extab_sec = NULL;
|
||||
Elf_Shdr *string_sec;
|
||||
Elf_Sym *sym;
|
||||
const Elf_Sym *symtab;
|
||||
Elf32_Word *symtab_shndx = NULL;
|
||||
Elf_Sym *sort_needed_sym = NULL;
|
||||
Elf_Shdr *sort_needed_sec;
|
||||
uint32_t *sort_needed_loc;
|
||||
void *sym_start;
|
||||
void *sym_end;
|
||||
const char *secstrings;
|
||||
const char *strtab;
|
||||
char *extab_image;
|
||||
int sort_need_index;
|
||||
int symentsize;
|
||||
int shentsize;
|
||||
int idx;
|
||||
int i;
|
||||
unsigned int shnum;
|
||||
unsigned int shstrndx;
|
||||
#ifdef MCOUNT_SORT_ENABLED
|
||||
struct elf_mcount_loc mstruct = {0};
|
||||
#endif
|
||||
#ifdef UNWINDER_ORC_ENABLED
|
||||
unsigned int orc_ip_size = 0;
|
||||
unsigned int orc_size = 0;
|
||||
unsigned int orc_num_entries = 0;
|
||||
#endif
|
||||
|
||||
shdr_start = (Elf_Shdr *)((char *)ehdr + ehdr_shoff(ehdr));
|
||||
shentsize = ehdr_shentsize(ehdr);
|
||||
|
||||
shstrndx = ehdr_shstrndx(ehdr);
|
||||
if (shstrndx == SHN_XINDEX)
|
||||
shstrndx = shdr_link(shdr_start);
|
||||
string_sec = get_index(shdr_start, shentsize, shstrndx);
|
||||
secstrings = (const char *)ehdr + shdr_offset(string_sec);
|
||||
|
||||
shnum = ehdr_shnum(ehdr);
|
||||
if (shnum == SHN_UNDEF)
|
||||
shnum = shdr_size(shdr_start);
|
||||
|
||||
for (i = 0; i < shnum; i++) {
|
||||
Elf_Shdr *shdr = get_index(shdr_start, shentsize, i);
|
||||
|
||||
idx = shdr_name(shdr);
|
||||
if (!strcmp(secstrings + idx, "__ex_table"))
|
||||
extab_sec = shdr;
|
||||
if (!strcmp(secstrings + idx, ".symtab"))
|
||||
symtab_sec = shdr;
|
||||
if (!strcmp(secstrings + idx, ".strtab"))
|
||||
strtab_sec = shdr;
|
||||
|
||||
if (shdr_type(shdr) == SHT_SYMTAB_SHNDX)
|
||||
symtab_shndx = (Elf32_Word *)((const char *)ehdr +
|
||||
shdr_offset(shdr));
|
||||
|
||||
#ifdef MCOUNT_SORT_ENABLED
|
||||
/* locate the .init.data section in vmlinux */
|
||||
if (!strcmp(secstrings + idx, ".init.data"))
|
||||
mstruct.init_data_sec = shdr;
|
||||
#endif
|
||||
|
||||
#ifdef UNWINDER_ORC_ENABLED
|
||||
/* locate the ORC unwind tables */
|
||||
if (!strcmp(secstrings + idx, ".orc_unwind_ip")) {
|
||||
orc_ip_size = shdr_size(shdr);
|
||||
g_orc_ip_table = (int *)((void *)ehdr +
|
||||
shdr_offset(shdr));
|
||||
}
|
||||
if (!strcmp(secstrings + idx, ".orc_unwind")) {
|
||||
orc_size = shdr_size(shdr);
|
||||
g_orc_table = (struct orc_entry *)((void *)ehdr +
|
||||
shdr_offset(shdr));
|
||||
}
|
||||
#endif
|
||||
} /* for loop */
|
||||
|
||||
#ifdef UNWINDER_ORC_ENABLED
|
||||
if (!g_orc_ip_table || !g_orc_table) {
|
||||
fprintf(stderr,
|
||||
"incomplete ORC unwind tables in file: %s\n", fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
orc_num_entries = orc_ip_size / sizeof(int);
|
||||
if (orc_ip_size % sizeof(int) != 0 ||
|
||||
orc_size % sizeof(struct orc_entry) != 0 ||
|
||||
orc_num_entries != orc_size / sizeof(struct orc_entry)) {
|
||||
fprintf(stderr,
|
||||
"inconsistent ORC unwind table entries in file: %s\n",
|
||||
fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* create thread to sort ORC unwind tables concurrently */
|
||||
if (pthread_create(&orc_sort_thread, NULL,
|
||||
sort_orctable, &orc_ip_size)) {
|
||||
fprintf(stderr,
|
||||
"pthread_create orc_sort_thread failed '%s': %s\n",
|
||||
strerror(errno), fname);
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
if (!extab_sec) {
|
||||
fprintf(stderr, "no __ex_table in file: %s\n", fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!symtab_sec) {
|
||||
fprintf(stderr, "no .symtab in file: %s\n", fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!strtab_sec) {
|
||||
fprintf(stderr, "no .strtab in file: %s\n", fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
extab_image = (void *)ehdr + shdr_offset(extab_sec);
|
||||
strtab = (const char *)ehdr + shdr_offset(strtab_sec);
|
||||
symtab = (const Elf_Sym *)((const char *)ehdr + shdr_offset(symtab_sec));
|
||||
|
||||
#ifdef MCOUNT_SORT_ENABLED
|
||||
mstruct.ehdr = ehdr;
|
||||
get_mcount_loc(&mstruct, symtab_sec, strtab);
|
||||
|
||||
if (!mstruct.init_data_sec || !mstruct.start_mcount_loc || !mstruct.stop_mcount_loc) {
|
||||
fprintf(stderr,
|
||||
"incomplete mcount's sort in file: %s\n",
|
||||
fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* create thread to sort mcount_loc concurrently */
|
||||
if (pthread_create(&mcount_sort_thread, NULL, &sort_mcount_loc, &mstruct)) {
|
||||
fprintf(stderr,
|
||||
"pthread_create mcount_sort_thread failed '%s': %s\n",
|
||||
strerror(errno), fname);
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (custom_sort) {
|
||||
custom_sort(extab_image, shdr_size(extab_sec));
|
||||
} else {
|
||||
int num_entries = shdr_size(extab_sec) / extable_ent_size;
|
||||
qsort(extab_image, num_entries,
|
||||
extable_ent_size, compare_extable);
|
||||
}
|
||||
|
||||
/* find the flag main_extable_sort_needed */
|
||||
sym_start = (void *)ehdr + shdr_offset(symtab_sec);
|
||||
sym_end = sym_start + shdr_size(symtab_sec);
|
||||
symentsize = shdr_entsize(symtab_sec);
|
||||
|
||||
for (sym = sym_start; (void *)sym + symentsize < sym_end;
|
||||
sym = (void *)sym + symentsize) {
|
||||
if (sym_type(sym) != STT_OBJECT)
|
||||
continue;
|
||||
if (!strcmp(strtab + sym_name(sym),
|
||||
"main_extable_sort_needed")) {
|
||||
sort_needed_sym = sym;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!sort_needed_sym) {
|
||||
fprintf(stderr,
|
||||
"no main_extable_sort_needed symbol in file: %s\n",
|
||||
fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sort_need_index = get_secindex(sym_shndx(sym),
|
||||
((void *)sort_needed_sym - (void *)symtab) / symentsize,
|
||||
symtab_shndx);
|
||||
sort_needed_sec = get_index(shdr_start, shentsize, sort_need_index);
|
||||
sort_needed_loc = (void *)ehdr +
|
||||
shdr_offset(sort_needed_sec) +
|
||||
sym_value(sort_needed_sym) - shdr_addr(sort_needed_sec);
|
||||
|
||||
/* extable has been sorted, clear the flag */
|
||||
w(0, sort_needed_loc);
|
||||
rc = 0;
|
||||
|
||||
out:
|
||||
#ifdef UNWINDER_ORC_ENABLED
|
||||
if (orc_sort_thread) {
|
||||
void *retval = NULL;
|
||||
/* wait for ORC tables sort done */
|
||||
rc = pthread_join(orc_sort_thread, &retval);
|
||||
if (rc) {
|
||||
fprintf(stderr,
|
||||
"pthread_join failed '%s': %s\n",
|
||||
strerror(errno), fname);
|
||||
} else if (retval) {
|
||||
rc = -1;
|
||||
fprintf(stderr,
|
||||
"failed to sort ORC tables '%s': %s\n",
|
||||
(char *)retval, fname);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef MCOUNT_SORT_ENABLED
|
||||
if (mcount_sort_thread) {
|
||||
void *retval = NULL;
|
||||
/* wait for mcount sort done */
|
||||
rc = pthread_join(mcount_sort_thread, &retval);
|
||||
if (rc) {
|
||||
fprintf(stderr,
|
||||
"pthread_join failed '%s': %s\n",
|
||||
strerror(errno), fname);
|
||||
} else if (retval) {
|
||||
rc = -1;
|
||||
fprintf(stderr,
|
||||
"failed to sort mcount '%s': %s\n",
|
||||
(char *)retval, fname);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int compare_relative_table(const void *a, const void *b)
|
||||
{
|
||||
@ -267,41 +807,36 @@ static void sort_relative_table_with_data(char *extab_image, int image_size)
|
||||
|
||||
static int do_file(char const *const fname, void *addr)
|
||||
{
|
||||
int rc = -1;
|
||||
Elf32_Ehdr *ehdr = addr;
|
||||
Elf_Ehdr *ehdr = addr;
|
||||
table_sort_t custom_sort = NULL;
|
||||
|
||||
switch (ehdr->e_ident[EI_DATA]) {
|
||||
switch (ehdr->e32.e_ident[EI_DATA]) {
|
||||
case ELFDATA2LSB:
|
||||
r = rle;
|
||||
r2 = r2le;
|
||||
r8 = r8le;
|
||||
w = wle;
|
||||
w2 = w2le;
|
||||
w8 = w8le;
|
||||
break;
|
||||
case ELFDATA2MSB:
|
||||
r = rbe;
|
||||
r2 = r2be;
|
||||
r8 = r8be;
|
||||
w = wbe;
|
||||
w2 = w2be;
|
||||
w8 = w8be;
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
|
||||
ehdr->e_ident[EI_DATA], fname);
|
||||
ehdr->e32.e_ident[EI_DATA], fname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 ||
|
||||
(r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) ||
|
||||
ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
|
||||
if (memcmp(ELFMAG, ehdr->e32.e_ident, SELFMAG) != 0 ||
|
||||
(r2(&ehdr->e32.e_type) != ET_EXEC && r2(&ehdr->e32.e_type) != ET_DYN) ||
|
||||
ehdr->e32.e_ident[EI_VERSION] != EV_CURRENT) {
|
||||
fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch (r2(&ehdr->e_machine)) {
|
||||
switch (r2(&ehdr->e32.e_machine)) {
|
||||
case EM_386:
|
||||
case EM_AARCH64:
|
||||
case EM_LOONGARCH:
|
||||
@ -324,40 +859,74 @@ static int do_file(char const *const fname, void *addr)
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "unrecognized e_machine %d %s\n",
|
||||
r2(&ehdr->e_machine), fname);
|
||||
r2(&ehdr->e32.e_machine), fname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch (ehdr->e_ident[EI_CLASS]) {
|
||||
switch (ehdr->e32.e_ident[EI_CLASS]) {
|
||||
case ELFCLASS32:
|
||||
if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) ||
|
||||
r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
|
||||
if (r2(&ehdr->e32.e_ehsize) != sizeof(Elf32_Ehdr) ||
|
||||
r2(&ehdr->e32.e_shentsize) != sizeof(Elf32_Shdr)) {
|
||||
fprintf(stderr,
|
||||
"unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
|
||||
break;
|
||||
return -1;
|
||||
}
|
||||
rc = do_sort_32(ehdr, fname, custom_sort);
|
||||
|
||||
compare_extable = compare_extable_32;
|
||||
ehdr_shoff = ehdr32_shoff;
|
||||
ehdr_shentsize = ehdr32_shentsize;
|
||||
ehdr_shstrndx = ehdr32_shstrndx;
|
||||
ehdr_shnum = ehdr32_shnum;
|
||||
shdr_addr = shdr32_addr;
|
||||
shdr_offset = shdr32_offset;
|
||||
shdr_link = shdr32_link;
|
||||
shdr_size = shdr32_size;
|
||||
shdr_name = shdr32_name;
|
||||
shdr_type = shdr32_type;
|
||||
shdr_entsize = shdr32_entsize;
|
||||
sym_type = sym32_type;
|
||||
sym_name = sym32_name;
|
||||
sym_value = sym32_value;
|
||||
sym_shndx = sym32_shndx;
|
||||
long_size = 4;
|
||||
extable_ent_size = 8;
|
||||
break;
|
||||
case ELFCLASS64:
|
||||
{
|
||||
Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
|
||||
if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) ||
|
||||
r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
|
||||
if (r2(&ehdr->e64.e_ehsize) != sizeof(Elf64_Ehdr) ||
|
||||
r2(&ehdr->e64.e_shentsize) != sizeof(Elf64_Shdr)) {
|
||||
fprintf(stderr,
|
||||
"unrecognized ET_EXEC/ET_DYN file: %s\n",
|
||||
fname);
|
||||
break;
|
||||
}
|
||||
rc = do_sort_64(ghdr, fname, custom_sort);
|
||||
return -1;
|
||||
}
|
||||
|
||||
compare_extable = compare_extable_64;
|
||||
ehdr_shoff = ehdr64_shoff;
|
||||
ehdr_shentsize = ehdr64_shentsize;
|
||||
ehdr_shstrndx = ehdr64_shstrndx;
|
||||
ehdr_shnum = ehdr64_shnum;
|
||||
shdr_addr = shdr64_addr;
|
||||
shdr_offset = shdr64_offset;
|
||||
shdr_link = shdr64_link;
|
||||
shdr_size = shdr64_size;
|
||||
shdr_name = shdr64_name;
|
||||
shdr_type = shdr64_type;
|
||||
shdr_entsize = shdr64_entsize;
|
||||
sym_type = sym64_type;
|
||||
sym_name = sym64_name;
|
||||
sym_value = sym64_value;
|
||||
sym_shndx = sym64_shndx;
|
||||
long_size = 8;
|
||||
extable_ent_size = 16;
|
||||
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "unrecognized ELF class %d %s\n",
|
||||
ehdr->e_ident[EI_CLASS], fname);
|
||||
break;
|
||||
ehdr->e32.e_ident[EI_CLASS], fname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return rc;
|
||||
return do_sort(ehdr, fname, custom_sort);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
|
@ -1,500 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* sorttable.h
|
||||
*
|
||||
* Added ORC unwind tables sort support and other updates:
|
||||
* Copyright (C) 1999-2019 Alibaba Group Holding Limited. by:
|
||||
* Shile Zhang <shile.zhang@linux.alibaba.com>
|
||||
*
|
||||
* Copyright 2011 - 2012 Cavium, Inc.
|
||||
*
|
||||
* Some of code was taken out of arch/x86/kernel/unwind_orc.c, written by:
|
||||
* Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
*
|
||||
* Some of this code was taken out of recordmcount.h written by:
|
||||
*
|
||||
* Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
|
||||
* Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
|
||||
*/
|
||||
|
||||
#undef extable_ent_size
|
||||
#undef compare_extable
|
||||
#undef get_mcount_loc
|
||||
#undef sort_mcount_loc
|
||||
#undef elf_mcount_loc
|
||||
#undef do_sort
|
||||
#undef Elf_Addr
|
||||
#undef Elf_Ehdr
|
||||
#undef Elf_Shdr
|
||||
#undef Elf_Rel
|
||||
#undef Elf_Rela
|
||||
#undef Elf_Sym
|
||||
#undef ELF_R_SYM
|
||||
#undef Elf_r_sym
|
||||
#undef ELF_R_INFO
|
||||
#undef Elf_r_info
|
||||
#undef ELF_ST_BIND
|
||||
#undef ELF_ST_TYPE
|
||||
#undef fn_ELF_R_SYM
|
||||
#undef fn_ELF_R_INFO
|
||||
#undef uint_t
|
||||
#undef _r
|
||||
#undef _w
|
||||
|
||||
#ifdef SORTTABLE_64
|
||||
# define extable_ent_size 16
|
||||
# define compare_extable compare_extable_64
|
||||
# define get_mcount_loc get_mcount_loc_64
|
||||
# define sort_mcount_loc sort_mcount_loc_64
|
||||
# define elf_mcount_loc elf_mcount_loc_64
|
||||
# define do_sort do_sort_64
|
||||
# define Elf_Addr Elf64_Addr
|
||||
# define Elf_Ehdr Elf64_Ehdr
|
||||
# define Elf_Shdr Elf64_Shdr
|
||||
# define Elf_Rel Elf64_Rel
|
||||
# define Elf_Rela Elf64_Rela
|
||||
# define Elf_Sym Elf64_Sym
|
||||
# define ELF_R_SYM ELF64_R_SYM
|
||||
# define Elf_r_sym Elf64_r_sym
|
||||
# define ELF_R_INFO ELF64_R_INFO
|
||||
# define Elf_r_info Elf64_r_info
|
||||
# define ELF_ST_BIND ELF64_ST_BIND
|
||||
# define ELF_ST_TYPE ELF64_ST_TYPE
|
||||
# define fn_ELF_R_SYM fn_ELF64_R_SYM
|
||||
# define fn_ELF_R_INFO fn_ELF64_R_INFO
|
||||
# define uint_t uint64_t
|
||||
# define _r r8
|
||||
# define _w w8
|
||||
#else
|
||||
# define extable_ent_size 8
|
||||
# define compare_extable compare_extable_32
|
||||
# define get_mcount_loc get_mcount_loc_32
|
||||
# define sort_mcount_loc sort_mcount_loc_32
|
||||
# define elf_mcount_loc elf_mcount_loc_32
|
||||
# define do_sort do_sort_32
|
||||
# define Elf_Addr Elf32_Addr
|
||||
# define Elf_Ehdr Elf32_Ehdr
|
||||
# define Elf_Shdr Elf32_Shdr
|
||||
# define Elf_Rel Elf32_Rel
|
||||
# define Elf_Rela Elf32_Rela
|
||||
# define Elf_Sym Elf32_Sym
|
||||
# define ELF_R_SYM ELF32_R_SYM
|
||||
# define Elf_r_sym Elf32_r_sym
|
||||
# define ELF_R_INFO ELF32_R_INFO
|
||||
# define Elf_r_info Elf32_r_info
|
||||
# define ELF_ST_BIND ELF32_ST_BIND
|
||||
# define ELF_ST_TYPE ELF32_ST_TYPE
|
||||
# define fn_ELF_R_SYM fn_ELF32_R_SYM
|
||||
# define fn_ELF_R_INFO fn_ELF32_R_INFO
|
||||
# define uint_t uint32_t
|
||||
# define _r r
|
||||
# define _w w
|
||||
#endif
|
||||
|
||||
#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
|
||||
/* ORC unwinder only support X86_64 */
|
||||
#include <asm/orc_types.h>
|
||||
|
||||
#define ERRSTR_MAXSZ 256
|
||||
|
||||
char g_err[ERRSTR_MAXSZ];
|
||||
int *g_orc_ip_table;
|
||||
struct orc_entry *g_orc_table;
|
||||
|
||||
pthread_t orc_sort_thread;
|
||||
|
||||
static inline unsigned long orc_ip(const int *ip)
|
||||
{
|
||||
return (unsigned long)ip + *ip;
|
||||
}
|
||||
|
||||
static int orc_sort_cmp(const void *_a, const void *_b)
|
||||
{
|
||||
struct orc_entry *orc_a, *orc_b;
|
||||
const int *a = g_orc_ip_table + *(int *)_a;
|
||||
const int *b = g_orc_ip_table + *(int *)_b;
|
||||
unsigned long a_val = orc_ip(a);
|
||||
unsigned long b_val = orc_ip(b);
|
||||
|
||||
if (a_val > b_val)
|
||||
return 1;
|
||||
if (a_val < b_val)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* The "weak" section terminator entries need to always be on the left
|
||||
* to ensure the lookup code skips them in favor of real entries.
|
||||
* These terminator entries exist to handle any gaps created by
|
||||
* whitelisted .o files which didn't get objtool generation.
|
||||
*/
|
||||
orc_a = g_orc_table + (a - g_orc_ip_table);
|
||||
orc_b = g_orc_table + (b - g_orc_ip_table);
|
||||
if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED)
|
||||
return 0;
|
||||
return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
|
||||
}
|
||||
|
||||
static void *sort_orctable(void *arg)
|
||||
{
|
||||
int i;
|
||||
int *idxs = NULL;
|
||||
int *tmp_orc_ip_table = NULL;
|
||||
struct orc_entry *tmp_orc_table = NULL;
|
||||
unsigned int *orc_ip_size = (unsigned int *)arg;
|
||||
unsigned int num_entries = *orc_ip_size / sizeof(int);
|
||||
unsigned int orc_size = num_entries * sizeof(struct orc_entry);
|
||||
|
||||
idxs = (int *)malloc(*orc_ip_size);
|
||||
if (!idxs) {
|
||||
snprintf(g_err, ERRSTR_MAXSZ, "malloc idxs: %s",
|
||||
strerror(errno));
|
||||
pthread_exit(g_err);
|
||||
}
|
||||
|
||||
tmp_orc_ip_table = (int *)malloc(*orc_ip_size);
|
||||
if (!tmp_orc_ip_table) {
|
||||
snprintf(g_err, ERRSTR_MAXSZ, "malloc tmp_orc_ip_table: %s",
|
||||
strerror(errno));
|
||||
pthread_exit(g_err);
|
||||
}
|
||||
|
||||
tmp_orc_table = (struct orc_entry *)malloc(orc_size);
|
||||
if (!tmp_orc_table) {
|
||||
snprintf(g_err, ERRSTR_MAXSZ, "malloc tmp_orc_table: %s",
|
||||
strerror(errno));
|
||||
pthread_exit(g_err);
|
||||
}
|
||||
|
||||
/* initialize indices array, convert ip_table to absolute address */
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
idxs[i] = i;
|
||||
tmp_orc_ip_table[i] = g_orc_ip_table[i] + i * sizeof(int);
|
||||
}
|
||||
memcpy(tmp_orc_table, g_orc_table, orc_size);
|
||||
|
||||
qsort(idxs, num_entries, sizeof(int), orc_sort_cmp);
|
||||
|
||||
for (i = 0; i < num_entries; i++) {
|
||||
if (idxs[i] == i)
|
||||
continue;
|
||||
|
||||
/* convert back to relative address */
|
||||
g_orc_ip_table[i] = tmp_orc_ip_table[idxs[i]] - i * sizeof(int);
|
||||
g_orc_table[i] = tmp_orc_table[idxs[i]];
|
||||
}
|
||||
|
||||
free(idxs);
|
||||
free(tmp_orc_ip_table);
|
||||
free(tmp_orc_table);
|
||||
pthread_exit(NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int compare_extable(const void *a, const void *b)
|
||||
{
|
||||
Elf_Addr av = _r(a);
|
||||
Elf_Addr bv = _r(b);
|
||||
|
||||
if (av < bv)
|
||||
return -1;
|
||||
if (av > bv)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
#ifdef MCOUNT_SORT_ENABLED
|
||||
pthread_t mcount_sort_thread;
|
||||
|
||||
struct elf_mcount_loc {
|
||||
Elf_Ehdr *ehdr;
|
||||
Elf_Shdr *init_data_sec;
|
||||
uint_t start_mcount_loc;
|
||||
uint_t stop_mcount_loc;
|
||||
};
|
||||
|
||||
/* Sort the addresses stored between __start_mcount_loc to __stop_mcount_loc in vmlinux */
|
||||
static void *sort_mcount_loc(void *arg)
|
||||
{
|
||||
struct elf_mcount_loc *emloc = (struct elf_mcount_loc *)arg;
|
||||
uint_t offset = emloc->start_mcount_loc - _r(&(emloc->init_data_sec)->sh_addr)
|
||||
+ _r(&(emloc->init_data_sec)->sh_offset);
|
||||
uint_t count = emloc->stop_mcount_loc - emloc->start_mcount_loc;
|
||||
unsigned char *start_loc = (void *)emloc->ehdr + offset;
|
||||
|
||||
qsort(start_loc, count/sizeof(uint_t), sizeof(uint_t), compare_extable);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Get the address of __start_mcount_loc and __stop_mcount_loc in System.map */
|
||||
static void get_mcount_loc(uint_t *_start, uint_t *_stop)
|
||||
{
|
||||
FILE *file_start, *file_stop;
|
||||
char start_buff[20];
|
||||
char stop_buff[20];
|
||||
int len = 0;
|
||||
|
||||
file_start = popen(" grep start_mcount System.map | awk '{print $1}' ", "r");
|
||||
if (!file_start) {
|
||||
fprintf(stderr, "get start_mcount_loc error!");
|
||||
return;
|
||||
}
|
||||
|
||||
file_stop = popen(" grep stop_mcount System.map | awk '{print $1}' ", "r");
|
||||
if (!file_stop) {
|
||||
fprintf(stderr, "get stop_mcount_loc error!");
|
||||
pclose(file_start);
|
||||
return;
|
||||
}
|
||||
|
||||
while (fgets(start_buff, sizeof(start_buff), file_start) != NULL) {
|
||||
len = strlen(start_buff);
|
||||
start_buff[len - 1] = '\0';
|
||||
}
|
||||
*_start = strtoul(start_buff, NULL, 16);
|
||||
|
||||
while (fgets(stop_buff, sizeof(stop_buff), file_stop) != NULL) {
|
||||
len = strlen(stop_buff);
|
||||
stop_buff[len - 1] = '\0';
|
||||
}
|
||||
*_stop = strtoul(stop_buff, NULL, 16);
|
||||
|
||||
pclose(file_start);
|
||||
pclose(file_stop);
|
||||
}
|
||||
#endif
|
||||
static int do_sort(Elf_Ehdr *ehdr,
|
||||
char const *const fname,
|
||||
table_sort_t custom_sort)
|
||||
{
|
||||
int rc = -1;
|
||||
Elf_Shdr *s, *shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
|
||||
Elf_Shdr *strtab_sec = NULL;
|
||||
Elf_Shdr *symtab_sec = NULL;
|
||||
Elf_Shdr *extab_sec = NULL;
|
||||
Elf_Sym *sym;
|
||||
const Elf_Sym *symtab;
|
||||
Elf32_Word *symtab_shndx = NULL;
|
||||
Elf_Sym *sort_needed_sym = NULL;
|
||||
Elf_Shdr *sort_needed_sec;
|
||||
Elf_Rel *relocs = NULL;
|
||||
int relocs_size = 0;
|
||||
uint32_t *sort_needed_loc;
|
||||
const char *secstrings;
|
||||
const char *strtab;
|
||||
char *extab_image;
|
||||
int extab_index = 0;
|
||||
int i;
|
||||
int idx;
|
||||
unsigned int shnum;
|
||||
unsigned int shstrndx;
|
||||
#ifdef MCOUNT_SORT_ENABLED
|
||||
struct elf_mcount_loc mstruct = {0};
|
||||
uint_t _start_mcount_loc = 0;
|
||||
uint_t _stop_mcount_loc = 0;
|
||||
#endif
|
||||
#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
|
||||
unsigned int orc_ip_size = 0;
|
||||
unsigned int orc_size = 0;
|
||||
unsigned int orc_num_entries = 0;
|
||||
#endif
|
||||
|
||||
shstrndx = r2(&ehdr->e_shstrndx);
|
||||
if (shstrndx == SHN_XINDEX)
|
||||
shstrndx = r(&shdr[0].sh_link);
|
||||
secstrings = (const char *)ehdr + _r(&shdr[shstrndx].sh_offset);
|
||||
|
||||
shnum = r2(&ehdr->e_shnum);
|
||||
if (shnum == SHN_UNDEF)
|
||||
shnum = _r(&shdr[0].sh_size);
|
||||
|
||||
for (i = 0, s = shdr; s < shdr + shnum; i++, s++) {
|
||||
idx = r(&s->sh_name);
|
||||
if (!strcmp(secstrings + idx, "__ex_table")) {
|
||||
extab_sec = s;
|
||||
extab_index = i;
|
||||
}
|
||||
if (!strcmp(secstrings + idx, ".symtab"))
|
||||
symtab_sec = s;
|
||||
if (!strcmp(secstrings + idx, ".strtab"))
|
||||
strtab_sec = s;
|
||||
|
||||
if ((r(&s->sh_type) == SHT_REL ||
|
||||
r(&s->sh_type) == SHT_RELA) &&
|
||||
r(&s->sh_info) == extab_index) {
|
||||
relocs = (void *)ehdr + _r(&s->sh_offset);
|
||||
relocs_size = _r(&s->sh_size);
|
||||
}
|
||||
if (r(&s->sh_type) == SHT_SYMTAB_SHNDX)
|
||||
symtab_shndx = (Elf32_Word *)((const char *)ehdr +
|
||||
_r(&s->sh_offset));
|
||||
|
||||
#ifdef MCOUNT_SORT_ENABLED
|
||||
/* locate the .init.data section in vmlinux */
|
||||
if (!strcmp(secstrings + idx, ".init.data")) {
|
||||
get_mcount_loc(&_start_mcount_loc, &_stop_mcount_loc);
|
||||
mstruct.ehdr = ehdr;
|
||||
mstruct.init_data_sec = s;
|
||||
mstruct.start_mcount_loc = _start_mcount_loc;
|
||||
mstruct.stop_mcount_loc = _stop_mcount_loc;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
|
||||
/* locate the ORC unwind tables */
|
||||
if (!strcmp(secstrings + idx, ".orc_unwind_ip")) {
|
||||
orc_ip_size = s->sh_size;
|
||||
g_orc_ip_table = (int *)((void *)ehdr +
|
||||
s->sh_offset);
|
||||
}
|
||||
if (!strcmp(secstrings + idx, ".orc_unwind")) {
|
||||
orc_size = s->sh_size;
|
||||
g_orc_table = (struct orc_entry *)((void *)ehdr +
|
||||
s->sh_offset);
|
||||
}
|
||||
#endif
|
||||
} /* for loop */
|
||||
|
||||
#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
|
||||
if (!g_orc_ip_table || !g_orc_table) {
|
||||
fprintf(stderr,
|
||||
"incomplete ORC unwind tables in file: %s\n", fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
orc_num_entries = orc_ip_size / sizeof(int);
|
||||
if (orc_ip_size % sizeof(int) != 0 ||
|
||||
orc_size % sizeof(struct orc_entry) != 0 ||
|
||||
orc_num_entries != orc_size / sizeof(struct orc_entry)) {
|
||||
fprintf(stderr,
|
||||
"inconsistent ORC unwind table entries in file: %s\n",
|
||||
fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* create thread to sort ORC unwind tables concurrently */
|
||||
if (pthread_create(&orc_sort_thread, NULL,
|
||||
sort_orctable, &orc_ip_size)) {
|
||||
fprintf(stderr,
|
||||
"pthread_create orc_sort_thread failed '%s': %s\n",
|
||||
strerror(errno), fname);
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef MCOUNT_SORT_ENABLED
|
||||
if (!mstruct.init_data_sec || !_start_mcount_loc || !_stop_mcount_loc) {
|
||||
fprintf(stderr,
|
||||
"incomplete mcount's sort in file: %s\n",
|
||||
fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* create thread to sort mcount_loc concurrently */
|
||||
if (pthread_create(&mcount_sort_thread, NULL, &sort_mcount_loc, &mstruct)) {
|
||||
fprintf(stderr,
|
||||
"pthread_create mcount_sort_thread failed '%s': %s\n",
|
||||
strerror(errno), fname);
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
if (!extab_sec) {
|
||||
fprintf(stderr, "no __ex_table in file: %s\n", fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!symtab_sec) {
|
||||
fprintf(stderr, "no .symtab in file: %s\n", fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!strtab_sec) {
|
||||
fprintf(stderr, "no .strtab in file: %s\n", fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
extab_image = (void *)ehdr + _r(&extab_sec->sh_offset);
|
||||
strtab = (const char *)ehdr + _r(&strtab_sec->sh_offset);
|
||||
symtab = (const Elf_Sym *)((const char *)ehdr +
|
||||
_r(&symtab_sec->sh_offset));
|
||||
|
||||
if (custom_sort) {
|
||||
custom_sort(extab_image, _r(&extab_sec->sh_size));
|
||||
} else {
|
||||
int num_entries = _r(&extab_sec->sh_size) / extable_ent_size;
|
||||
qsort(extab_image, num_entries,
|
||||
extable_ent_size, compare_extable);
|
||||
}
|
||||
|
||||
/* If there were relocations, we no longer need them. */
|
||||
if (relocs)
|
||||
memset(relocs, 0, relocs_size);
|
||||
|
||||
/* find the flag main_extable_sort_needed */
|
||||
for (sym = (void *)ehdr + _r(&symtab_sec->sh_offset);
|
||||
sym < sym + _r(&symtab_sec->sh_size) / sizeof(Elf_Sym);
|
||||
sym++) {
|
||||
if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
|
||||
continue;
|
||||
if (!strcmp(strtab + r(&sym->st_name),
|
||||
"main_extable_sort_needed")) {
|
||||
sort_needed_sym = sym;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!sort_needed_sym) {
|
||||
fprintf(stderr,
|
||||
"no main_extable_sort_needed symbol in file: %s\n",
|
||||
fname);
|
||||
goto out;
|
||||
}
|
||||
|
||||
sort_needed_sec = &shdr[get_secindex(r2(&sym->st_shndx),
|
||||
sort_needed_sym - symtab,
|
||||
symtab_shndx)];
|
||||
sort_needed_loc = (void *)ehdr +
|
||||
_r(&sort_needed_sec->sh_offset) +
|
||||
_r(&sort_needed_sym->st_value) -
|
||||
_r(&sort_needed_sec->sh_addr);
|
||||
|
||||
/* extable has been sorted, clear the flag */
|
||||
w(0, sort_needed_loc);
|
||||
rc = 0;
|
||||
|
||||
out:
|
||||
#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
|
||||
if (orc_sort_thread) {
|
||||
void *retval = NULL;
|
||||
/* wait for ORC tables sort done */
|
||||
rc = pthread_join(orc_sort_thread, &retval);
|
||||
if (rc) {
|
||||
fprintf(stderr,
|
||||
"pthread_join failed '%s': %s\n",
|
||||
strerror(errno), fname);
|
||||
} else if (retval) {
|
||||
rc = -1;
|
||||
fprintf(stderr,
|
||||
"failed to sort ORC tables '%s': %s\n",
|
||||
(char *)retval, fname);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef MCOUNT_SORT_ENABLED
|
||||
if (mcount_sort_thread) {
|
||||
void *retval = NULL;
|
||||
/* wait for mcount sort done */
|
||||
rc = pthread_join(mcount_sort_thread, &retval);
|
||||
if (rc) {
|
||||
fprintf(stderr,
|
||||
"pthread_join failed '%s': %s\n",
|
||||
strerror(errno), fname);
|
||||
} else if (retval) {
|
||||
rc = -1;
|
||||
fprintf(stderr,
|
||||
"failed to sort mcount '%s': %s\n",
|
||||
(char *)retval, fname);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return rc;
|
||||
}
|
@ -6,4 +6,6 @@ TEST_PROGS := ftracetest-ktap
|
||||
TEST_FILES := test.d settings
|
||||
EXTRA_CLEAN := $(OUTPUT)/logs/*
|
||||
|
||||
TEST_GEN_PROGS = poll
|
||||
|
||||
include ../lib.mk
|
||||
|
74
tools/testing/selftests/ftrace/poll.c
Normal file
74
tools/testing/selftests/ftrace/poll.c
Normal file
@ -0,0 +1,74 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Simple poll on a file.
|
||||
*
|
||||
* Copyright (c) 2024 Google LLC.
|
||||
*/
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <poll.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define BUFSIZE 4096
|
||||
|
||||
/*
|
||||
* Usage:
|
||||
* poll [-I|-P] [-t timeout] FILE
|
||||
*/
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct pollfd pfd = {.events = POLLIN};
|
||||
char buf[BUFSIZE];
|
||||
int timeout = -1;
|
||||
int ret, opt;
|
||||
|
||||
while ((opt = getopt(argc, argv, "IPt:")) != -1) {
|
||||
switch (opt) {
|
||||
case 'I':
|
||||
pfd.events = POLLIN;
|
||||
break;
|
||||
case 'P':
|
||||
pfd.events = POLLPRI;
|
||||
break;
|
||||
case 't':
|
||||
timeout = atoi(optarg);
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Usage: %s [-I|-P] [-t timeout] FILE\n",
|
||||
argv[0]);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (optind >= argc) {
|
||||
fprintf(stderr, "Error: Polling file is not specified\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pfd.fd = open(argv[optind], O_RDONLY);
|
||||
if (pfd.fd < 0) {
|
||||
fprintf(stderr, "failed to open %s", argv[optind]);
|
||||
perror("open");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Reset poll by read if POLLIN is specified. */
|
||||
if (pfd.events & POLLIN)
|
||||
do {} while (read(pfd.fd, buf, BUFSIZE) == BUFSIZE);
|
||||
|
||||
ret = poll(&pfd, 1, timeout);
|
||||
if (ret < 0 && errno != EINTR) {
|
||||
perror("poll");
|
||||
return -1;
|
||||
}
|
||||
close(pfd.fd);
|
||||
|
||||
/* If timeout happned (ret == 0), exit code is 1 */
|
||||
if (ret == 0)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
@ -0,0 +1,74 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# description: event trigger - test poll wait on histogram
|
||||
# requires: set_event events/sched/sched_process_free/trigger events/sched/sched_process_free/hist
|
||||
# flags: instance
|
||||
|
||||
POLL=${FTRACETEST_ROOT}/poll
|
||||
|
||||
if [ ! -x ${POLL} ]; then
|
||||
echo "poll program is not compiled!"
|
||||
exit_unresolved
|
||||
fi
|
||||
|
||||
EVENT=events/sched/sched_process_free/
|
||||
|
||||
# Check poll ops is supported. Before implementing poll on hist file, it
|
||||
# returns soon with POLLIN | POLLOUT, but not POLLPRI.
|
||||
|
||||
# This must wait >1 sec and return 1 (timeout).
|
||||
set +e
|
||||
${POLL} -I -t 1000 ${EVENT}/hist
|
||||
ret=$?
|
||||
set -e
|
||||
if [ ${ret} != 1 ]; then
|
||||
echo "poll on hist file is not supported"
|
||||
exit_unsupported
|
||||
fi
|
||||
|
||||
# Test POLLIN
|
||||
echo > trace
|
||||
echo 'hist:key=comm if comm =="sleep"' > ${EVENT}/trigger
|
||||
echo 1 > ${EVENT}/enable
|
||||
|
||||
# This sleep command will exit after 2 seconds.
|
||||
sleep 2 &
|
||||
BGPID=$!
|
||||
# if timeout happens, poll returns 1.
|
||||
${POLL} -I -t 4000 ${EVENT}/hist
|
||||
echo 0 > tracing_on
|
||||
|
||||
if [ -d /proc/${BGPID} ]; then
|
||||
echo "poll exits too soon"
|
||||
kill -KILL ${BGPID} ||:
|
||||
exit_fail
|
||||
fi
|
||||
|
||||
if ! grep -qw "sleep" trace; then
|
||||
echo "poll exits before event happens"
|
||||
exit_fail
|
||||
fi
|
||||
|
||||
# Test POLLPRI
|
||||
echo > trace
|
||||
echo 1 > tracing_on
|
||||
|
||||
# This sleep command will exit after 2 seconds.
|
||||
sleep 2 &
|
||||
BGPID=$!
|
||||
# if timeout happens, poll returns 1.
|
||||
${POLL} -P -t 4000 ${EVENT}/hist
|
||||
echo 0 > tracing_on
|
||||
|
||||
if [ -d /proc/${BGPID} ]; then
|
||||
echo "poll exits too soon"
|
||||
kill -KILL ${BGPID} ||:
|
||||
exit_fail
|
||||
fi
|
||||
|
||||
if ! grep -qw "sleep" trace; then
|
||||
echo "poll exits before event happens"
|
||||
exit_fail
|
||||
fi
|
||||
|
||||
exit_pass
|
@ -19,13 +19,14 @@ class Automata:
|
||||
|
||||
invalid_state_str = "INVALID_STATE"
|
||||
|
||||
def __init__(self, file_path):
|
||||
def __init__(self, file_path, model_name=None):
|
||||
self.__dot_path = file_path
|
||||
self.name = self.__get_model_name()
|
||||
self.name = model_name or self.__get_model_name()
|
||||
self.__dot_lines = self.__open_dot()
|
||||
self.states, self.initial_state, self.final_states = self.__get_state_variables()
|
||||
self.events = self.__get_event_variables()
|
||||
self.function = self.__create_matrix()
|
||||
self.events_start, self.events_start_run = self.__store_init_events()
|
||||
|
||||
def __get_model_name(self):
|
||||
basename = ntpath.basename(self.__dot_path)
|
||||
@ -172,3 +173,34 @@ class Automata:
|
||||
cursor += 1
|
||||
|
||||
return matrix
|
||||
|
||||
def __store_init_events(self):
|
||||
events_start = [False] * len(self.events)
|
||||
events_start_run = [False] * len(self.events)
|
||||
for i, _ in enumerate(self.events):
|
||||
curr_event_will_init = 0
|
||||
curr_event_from_init = False
|
||||
curr_event_used = 0
|
||||
for j, _ in enumerate(self.states):
|
||||
if self.function[j][i] != self.invalid_state_str:
|
||||
curr_event_used += 1
|
||||
if self.function[j][i] == self.initial_state:
|
||||
curr_event_will_init += 1
|
||||
if self.function[0][i] != self.invalid_state_str:
|
||||
curr_event_from_init = True
|
||||
# this event always leads to init
|
||||
if curr_event_will_init and curr_event_used == curr_event_will_init:
|
||||
events_start[i] = True
|
||||
# this event is only called from init
|
||||
if curr_event_from_init and curr_event_used == 1:
|
||||
events_start_run[i] = True
|
||||
return events_start, events_start_run
|
||||
|
||||
def is_start_event(self, event):
|
||||
return self.events_start[self.events.index(event)]
|
||||
|
||||
def is_start_run_event(self, event):
|
||||
# prefer handle_start_event if there
|
||||
if any(self.events_start):
|
||||
return False
|
||||
return self.events_start_run[self.events.index(event)]
|
||||
|
@ -22,8 +22,8 @@ class Dot2c(Automata):
|
||||
struct_automaton_def = "automaton"
|
||||
var_automaton_def = "aut"
|
||||
|
||||
def __init__(self, file_path):
|
||||
super().__init__(file_path)
|
||||
def __init__(self, file_path, model_name=None):
|
||||
super().__init__(file_path, model_name)
|
||||
self.line_length = 100
|
||||
|
||||
def __buff_to_string(self, buff):
|
||||
|
@ -21,25 +21,24 @@ if __name__ == '__main__':
|
||||
parser.add_argument('-t', "--monitor_type", dest="monitor_type", required=True)
|
||||
parser.add_argument('-n', "--model_name", dest="model_name", required=False)
|
||||
parser.add_argument("-D", "--description", dest="description", required=False)
|
||||
parser.add_argument("-a", "--auto_patch", dest="auto_patch",
|
||||
action="store_true", required=False,
|
||||
help="Patch the kernel in place")
|
||||
params = parser.parse_args()
|
||||
|
||||
print("Opening and parsing the dot file %s" % params.dot_file)
|
||||
try:
|
||||
monitor=dot2k(params.dot_file, params.monitor_type)
|
||||
monitor=dot2k(params.dot_file, params.monitor_type, vars(params))
|
||||
except Exception as e:
|
||||
print('Error: '+ str(e))
|
||||
print("Sorry : :-(")
|
||||
sys.exit(1)
|
||||
|
||||
# easier than using argparse action.
|
||||
if params.model_name != None:
|
||||
print(params.model_name)
|
||||
|
||||
print("Writing the monitor into the directory %s" % monitor.name)
|
||||
monitor.print_files()
|
||||
print("Almost done, checklist")
|
||||
print(" - Edit the %s/%s.c to add the instrumentation" % (monitor.name, monitor.name))
|
||||
print(" - Edit include/trace/events/rv.h to add the tracepoint entry")
|
||||
print(" - Move it to the kernel's monitor directory")
|
||||
print(" - Edit kernel/trace/rv/Makefile")
|
||||
print(" - Edit kernel/trace/rv/Kconfig")
|
||||
print(monitor.fill_tracepoint_tooltip())
|
||||
print(monitor.fill_makefile_tooltip())
|
||||
print(monitor.fill_kconfig_tooltip())
|
||||
print(monitor.fill_monitor_tooltip())
|
||||
|
@ -14,50 +14,83 @@ import os
|
||||
|
||||
class dot2k(Dot2c):
|
||||
monitor_types = { "global" : 1, "per_cpu" : 2, "per_task" : 3 }
|
||||
monitor_templates_dir = "dot2k/rv_templates/"
|
||||
monitor_templates_dir = "dot2/dot2k_templates/"
|
||||
rv_dir = "kernel/trace/rv"
|
||||
monitor_type = "per_cpu"
|
||||
|
||||
def __init__(self, file_path, MonitorType):
|
||||
super().__init__(file_path)
|
||||
def __init__(self, file_path, MonitorType, extra_params={}):
|
||||
super().__init__(file_path, extra_params.get("model_name"))
|
||||
|
||||
self.monitor_type = self.monitor_types.get(MonitorType)
|
||||
if self.monitor_type == None:
|
||||
raise Exception("Unknown monitor type: %s" % MonitorType)
|
||||
if self.monitor_type is None:
|
||||
raise ValueError("Unknown monitor type: %s" % MonitorType)
|
||||
|
||||
self.monitor_type = MonitorType
|
||||
self.__fill_rv_templates_dir()
|
||||
self.main_c = self.__open_file(self.monitor_templates_dir + "main_" + MonitorType + ".c")
|
||||
self.main_c = self.__read_file(self.monitor_templates_dir + "main.c")
|
||||
self.trace_h = self.__read_file(self.monitor_templates_dir + "trace.h")
|
||||
self.kconfig = self.__read_file(self.monitor_templates_dir + "Kconfig")
|
||||
self.enum_suffix = "_%s" % self.name
|
||||
self.description = extra_params.get("description", self.name) or "auto-generated"
|
||||
self.auto_patch = extra_params.get("auto_patch")
|
||||
if self.auto_patch:
|
||||
self.__fill_rv_kernel_dir()
|
||||
|
||||
def __fill_rv_templates_dir(self):
|
||||
|
||||
if os.path.exists(self.monitor_templates_dir) == True:
|
||||
if os.path.exists(self.monitor_templates_dir):
|
||||
return
|
||||
|
||||
if platform.system() != "Linux":
|
||||
raise Exception("I can only run on Linux.")
|
||||
raise OSError("I can only run on Linux.")
|
||||
|
||||
kernel_path = "/lib/modules/%s/build/tools/verification/dot2/dot2k_templates/" % (platform.release())
|
||||
|
||||
if os.path.exists(kernel_path) == True:
|
||||
if os.path.exists(kernel_path):
|
||||
self.monitor_templates_dir = kernel_path
|
||||
return
|
||||
|
||||
if os.path.exists("/usr/share/dot2/dot2k_templates/") == True:
|
||||
if os.path.exists("/usr/share/dot2/dot2k_templates/"):
|
||||
self.monitor_templates_dir = "/usr/share/dot2/dot2k_templates/"
|
||||
return
|
||||
|
||||
raise Exception("Could not find the template directory, do you have the kernel source installed?")
|
||||
raise FileNotFoundError("Could not find the template directory, do you have the kernel source installed?")
|
||||
|
||||
def __fill_rv_kernel_dir(self):
|
||||
|
||||
def __open_file(self, path):
|
||||
# first try if we are running in the kernel tree root
|
||||
if os.path.exists(self.rv_dir):
|
||||
return
|
||||
|
||||
# offset if we are running inside the kernel tree from verification/dot2
|
||||
kernel_path = os.path.join("../..", self.rv_dir)
|
||||
|
||||
if os.path.exists(kernel_path):
|
||||
self.rv_dir = kernel_path
|
||||
return
|
||||
|
||||
if platform.system() != "Linux":
|
||||
raise OSError("I can only run on Linux.")
|
||||
|
||||
kernel_path = os.path.join("/lib/modules/%s/build" % platform.release(), self.rv_dir)
|
||||
|
||||
# if the current kernel is from a distro this may not be a full kernel tree
|
||||
# verify that one of the files we are going to modify is available
|
||||
if os.path.exists(os.path.join(kernel_path, "rv_trace.h")):
|
||||
self.rv_dir = kernel_path
|
||||
return
|
||||
|
||||
raise FileNotFoundError("Could not find the rv directory, do you have the kernel source installed?")
|
||||
|
||||
def __read_file(self, path):
|
||||
try:
|
||||
fd = open(path)
|
||||
fd = open(path, 'r')
|
||||
except OSError:
|
||||
raise Exception("Cannot open the file: %s" % path)
|
||||
|
||||
content = fd.read()
|
||||
|
||||
fd.close()
|
||||
return content
|
||||
|
||||
def __buff_to_string(self, buff):
|
||||
@ -69,16 +102,26 @@ class dot2k(Dot2c):
|
||||
# cut off the last \n
|
||||
return string[:-1]
|
||||
|
||||
def fill_monitor_type(self):
|
||||
return self.monitor_type.upper()
|
||||
|
||||
def fill_tracepoint_handlers_skel(self):
|
||||
buff = []
|
||||
for event in self.events:
|
||||
buff.append("static void handle_%s(void *data, /* XXX: fill header */)" % event)
|
||||
buff.append("{")
|
||||
handle = "handle_event"
|
||||
if self.is_start_event(event):
|
||||
buff.append("\t/* XXX: validate that this event always leads to the initial state */")
|
||||
handle = "handle_start_event"
|
||||
elif self.is_start_run_event(event):
|
||||
buff.append("\t/* XXX: validate that this event is only valid in the initial state */")
|
||||
handle = "handle_start_run_event"
|
||||
if self.monitor_type == "per_task":
|
||||
buff.append("\tstruct task_struct *p = /* XXX: how do I get p? */;");
|
||||
buff.append("\tda_handle_event_%s(p, %s%s);" % (self.name, event, self.enum_suffix));
|
||||
buff.append("\tda_%s_%s(p, %s%s);" % (handle, self.name, event, self.enum_suffix));
|
||||
else:
|
||||
buff.append("\tda_handle_event_%s(%s%s);" % (self.name, event, self.enum_suffix));
|
||||
buff.append("\tda_%s_%s(%s%s);" % (handle, self.name, event, self.enum_suffix));
|
||||
buff.append("}")
|
||||
buff.append("")
|
||||
return self.__buff_to_string(buff)
|
||||
@ -97,18 +140,21 @@ class dot2k(Dot2c):
|
||||
|
||||
def fill_main_c(self):
|
||||
main_c = self.main_c
|
||||
monitor_type = self.fill_monitor_type()
|
||||
min_type = self.get_minimun_type()
|
||||
nr_events = self.events.__len__()
|
||||
nr_events = len(self.events)
|
||||
tracepoint_handlers = self.fill_tracepoint_handlers_skel()
|
||||
tracepoint_attach = self.fill_tracepoint_attach_probe()
|
||||
tracepoint_detach = self.fill_tracepoint_detach_helper()
|
||||
|
||||
main_c = main_c.replace("MIN_TYPE", min_type)
|
||||
main_c = main_c.replace("MODEL_NAME", self.name)
|
||||
main_c = main_c.replace("NR_EVENTS", str(nr_events))
|
||||
main_c = main_c.replace("TRACEPOINT_HANDLERS_SKEL", tracepoint_handlers)
|
||||
main_c = main_c.replace("TRACEPOINT_ATTACH", tracepoint_attach)
|
||||
main_c = main_c.replace("TRACEPOINT_DETACH", tracepoint_detach)
|
||||
main_c = main_c.replace("%%MONITOR_TYPE%%", monitor_type)
|
||||
main_c = main_c.replace("%%MIN_TYPE%%", min_type)
|
||||
main_c = main_c.replace("%%MODEL_NAME%%", self.name)
|
||||
main_c = main_c.replace("%%NR_EVENTS%%", str(nr_events))
|
||||
main_c = main_c.replace("%%TRACEPOINT_HANDLERS_SKEL%%", tracepoint_handlers)
|
||||
main_c = main_c.replace("%%TRACEPOINT_ATTACH%%", tracepoint_attach)
|
||||
main_c = main_c.replace("%%TRACEPOINT_DETACH%%", tracepoint_detach)
|
||||
main_c = main_c.replace("%%DESCRIPTION%%", self.description)
|
||||
|
||||
return main_c
|
||||
|
||||
@ -137,31 +183,142 @@ class dot2k(Dot2c):
|
||||
|
||||
return self.__buff_to_string(buff)
|
||||
|
||||
def fill_monitor_class_type(self):
|
||||
if self.monitor_type == "per_task":
|
||||
return "DA_MON_EVENTS_ID"
|
||||
return "DA_MON_EVENTS_IMPLICIT"
|
||||
|
||||
def fill_monitor_class(self):
|
||||
if self.monitor_type == "per_task":
|
||||
return "da_monitor_id"
|
||||
return "da_monitor"
|
||||
|
||||
def fill_tracepoint_args_skel(self, tp_type):
|
||||
buff = []
|
||||
tp_args_event = [
|
||||
("char *", "state"),
|
||||
("char *", "event"),
|
||||
("char *", "next_state"),
|
||||
("bool ", "final_state"),
|
||||
]
|
||||
tp_args_error = [
|
||||
("char *", "state"),
|
||||
("char *", "event"),
|
||||
]
|
||||
tp_args_id = ("int ", "id")
|
||||
tp_args = tp_args_event if tp_type == "event" else tp_args_error
|
||||
if self.monitor_type == "per_task":
|
||||
tp_args.insert(0, tp_args_id)
|
||||
tp_proto_c = ", ".join([a+b for a,b in tp_args])
|
||||
tp_args_c = ", ".join([b for a,b in tp_args])
|
||||
buff.append(" TP_PROTO(%s)," % tp_proto_c)
|
||||
buff.append(" TP_ARGS(%s)" % tp_args_c)
|
||||
return self.__buff_to_string(buff)
|
||||
|
||||
def fill_trace_h(self):
|
||||
trace_h = self.trace_h
|
||||
monitor_class = self.fill_monitor_class()
|
||||
monitor_class_type = self.fill_monitor_class_type()
|
||||
tracepoint_args_skel_event = self.fill_tracepoint_args_skel("event")
|
||||
tracepoint_args_skel_error = self.fill_tracepoint_args_skel("error")
|
||||
trace_h = trace_h.replace("%%MODEL_NAME%%", self.name)
|
||||
trace_h = trace_h.replace("%%MODEL_NAME_UP%%", self.name.upper())
|
||||
trace_h = trace_h.replace("%%MONITOR_CLASS%%", monitor_class)
|
||||
trace_h = trace_h.replace("%%MONITOR_CLASS_TYPE%%", monitor_class_type)
|
||||
trace_h = trace_h.replace("%%TRACEPOINT_ARGS_SKEL_EVENT%%", tracepoint_args_skel_event)
|
||||
trace_h = trace_h.replace("%%TRACEPOINT_ARGS_SKEL_ERROR%%", tracepoint_args_skel_error)
|
||||
return trace_h
|
||||
|
||||
def fill_kconfig(self):
|
||||
kconfig = self.kconfig
|
||||
monitor_class_type = self.fill_monitor_class_type()
|
||||
kconfig = kconfig.replace("%%MODEL_NAME%%", self.name)
|
||||
kconfig = kconfig.replace("%%MODEL_NAME_UP%%", self.name.upper())
|
||||
kconfig = kconfig.replace("%%MONITOR_CLASS_TYPE%%", monitor_class_type)
|
||||
kconfig = kconfig.replace("%%DESCRIPTION%%", self.description)
|
||||
return kconfig
|
||||
|
||||
def __patch_file(self, file, marker, line):
|
||||
file_to_patch = os.path.join(self.rv_dir, file)
|
||||
content = self.__read_file(file_to_patch)
|
||||
content = content.replace(marker, line + "\n" + marker)
|
||||
self.__write_file(file_to_patch, content)
|
||||
|
||||
def fill_tracepoint_tooltip(self):
|
||||
monitor_class_type = self.fill_monitor_class_type()
|
||||
if self.auto_patch:
|
||||
self.__patch_file("rv_trace.h",
|
||||
"// Add new monitors based on CONFIG_%s here" % monitor_class_type,
|
||||
"#include <monitors/%s/%s_trace.h>" % (self.name, self.name))
|
||||
return " - Patching %s/rv_trace.h, double check the result" % self.rv_dir
|
||||
|
||||
return """ - Edit %s/rv_trace.h:
|
||||
Add this line where other tracepoints are included and %s is defined:
|
||||
#include <monitors/%s/%s_trace.h>
|
||||
""" % (self.rv_dir, monitor_class_type, self.name, self.name)
|
||||
|
||||
def fill_kconfig_tooltip(self):
|
||||
if self.auto_patch:
|
||||
self.__patch_file("Kconfig",
|
||||
"# Add new monitors here",
|
||||
"source \"kernel/trace/rv/monitors/%s/Kconfig\"" % (self.name))
|
||||
return " - Patching %s/Kconfig, double check the result" % self.rv_dir
|
||||
|
||||
return """ - Edit %s/Kconfig:
|
||||
Add this line where other monitors are included:
|
||||
source \"kernel/trace/rv/monitors/%s/Kconfig\"
|
||||
""" % (self.rv_dir, self.name)
|
||||
|
||||
def fill_makefile_tooltip(self):
|
||||
name = self.name
|
||||
name_up = name.upper()
|
||||
if self.auto_patch:
|
||||
self.__patch_file("Makefile",
|
||||
"# Add new monitors here",
|
||||
"obj-$(CONFIG_RV_MON_%s) += monitors/%s/%s.o" % (name_up, name, name))
|
||||
return " - Patching %s/Makefile, double check the result" % self.rv_dir
|
||||
|
||||
return """ - Edit %s/Makefile:
|
||||
Add this line where other monitors are included:
|
||||
obj-$(CONFIG_RV_MON_%s) += monitors/%s/%s.o
|
||||
""" % (self.rv_dir, name_up, name, name)
|
||||
|
||||
def fill_monitor_tooltip(self):
|
||||
if self.auto_patch:
|
||||
return " - Monitor created in %s/monitors/%s" % (self.rv_dir, self. name)
|
||||
return " - Move %s/ to the kernel's monitor directory (%s/monitors)" % (self.name, self.rv_dir)
|
||||
|
||||
def __create_directory(self):
|
||||
path = self.name
|
||||
if self.auto_patch:
|
||||
path = os.path.join(self.rv_dir, "monitors", path)
|
||||
try:
|
||||
os.mkdir(self.name)
|
||||
os.mkdir(path)
|
||||
except FileExistsError:
|
||||
return
|
||||
except:
|
||||
print("Fail creating the output dir: %s" % self.name)
|
||||
|
||||
def __create_file(self, file_name, content):
|
||||
path = "%s/%s" % (self.name, file_name)
|
||||
def __write_file(self, file_name, content):
|
||||
try:
|
||||
file = open(path, 'w')
|
||||
except FileExistsError:
|
||||
return
|
||||
file = open(file_name, 'w')
|
||||
except:
|
||||
print("Fail creating file: %s" % path)
|
||||
print("Fail writing to file: %s" % file_name)
|
||||
|
||||
file.write(content)
|
||||
|
||||
file.close()
|
||||
|
||||
def __create_file(self, file_name, content):
|
||||
path = "%s/%s" % (self.name, file_name)
|
||||
if self.auto_patch:
|
||||
path = os.path.join(self.rv_dir, "monitors", path)
|
||||
self.__write_file(path, content)
|
||||
|
||||
def __get_main_name(self):
|
||||
path = "%s/%s" % (self.name, "main.c")
|
||||
if os.path.exists(path) == False:
|
||||
return "main.c"
|
||||
if not os.path.exists(path):
|
||||
return "main.c"
|
||||
return "__main.c"
|
||||
|
||||
def print_files(self):
|
||||
@ -175,3 +332,10 @@ class dot2k(Dot2c):
|
||||
|
||||
path = "%s.h" % self.name
|
||||
self.__create_file(path, model_h)
|
||||
|
||||
trace_h = self.fill_trace_h()
|
||||
path = "%s_trace.h" % self.name
|
||||
self.__create_file(path, trace_h)
|
||||
|
||||
kconfig = self.fill_kconfig()
|
||||
self.__create_file("Kconfig", kconfig)
|
||||
|
6
tools/verification/dot2/dot2k_templates/Kconfig
Normal file
6
tools/verification/dot2/dot2k_templates/Kconfig
Normal file
@ -0,0 +1,6 @@
|
||||
config RV_MON_%%MODEL_NAME_UP%%
|
||||
depends on RV
|
||||
select %%MONITOR_CLASS_TYPE%%
|
||||
bool "%%MODEL_NAME%% monitor"
|
||||
help
|
||||
%%DESCRIPTION%%
|
91
tools/verification/dot2/dot2k_templates/main.c
Normal file
91
tools/verification/dot2/dot2k_templates/main.c
Normal file
@ -0,0 +1,91 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/rv.h>
|
||||
#include <rv/instrumentation.h>
|
||||
#include <rv/da_monitor.h>
|
||||
|
||||
#define MODULE_NAME "%%MODEL_NAME%%"
|
||||
|
||||
/*
|
||||
* XXX: include required tracepoint headers, e.g.,
|
||||
* #include <trace/events/sched.h>
|
||||
*/
|
||||
#include <rv_trace.h>
|
||||
|
||||
/*
|
||||
* This is the self-generated part of the monitor. Generally, there is no need
|
||||
* to touch this section.
|
||||
*/
|
||||
#include "%%MODEL_NAME%%.h"
|
||||
|
||||
/*
|
||||
* Declare the deterministic automata monitor.
|
||||
*
|
||||
* The rv monitor reference is needed for the monitor declaration.
|
||||
*/
|
||||
static struct rv_monitor rv_%%MODEL_NAME%%;
|
||||
DECLARE_DA_MON_%%MONITOR_TYPE%%(%%MODEL_NAME%%, %%MIN_TYPE%%);
|
||||
|
||||
/*
|
||||
* This is the instrumentation part of the monitor.
|
||||
*
|
||||
* This is the section where manual work is required. Here the kernel events
|
||||
* are translated into model's event.
|
||||
*
|
||||
*/
|
||||
%%TRACEPOINT_HANDLERS_SKEL%%
|
||||
static int enable_%%MODEL_NAME%%(void)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = da_monitor_init_%%MODEL_NAME%%();
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
%%TRACEPOINT_ATTACH%%
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void disable_%%MODEL_NAME%%(void)
|
||||
{
|
||||
rv_%%MODEL_NAME%%.enabled = 0;
|
||||
|
||||
%%TRACEPOINT_DETACH%%
|
||||
|
||||
da_monitor_destroy_%%MODEL_NAME%%();
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the monitor register section.
|
||||
*/
|
||||
static struct rv_monitor rv_%%MODEL_NAME%% = {
|
||||
.name = "%%MODEL_NAME%%",
|
||||
.description = "%%DESCRIPTION%%",
|
||||
.enable = enable_%%MODEL_NAME%%,
|
||||
.disable = disable_%%MODEL_NAME%%,
|
||||
.reset = da_monitor_reset_all_%%MODEL_NAME%%,
|
||||
.enabled = 0,
|
||||
};
|
||||
|
||||
static int __init register_%%MODEL_NAME%%(void)
|
||||
{
|
||||
rv_register_monitor(&rv_%%MODEL_NAME%%);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit unregister_%%MODEL_NAME%%(void)
|
||||
{
|
||||
rv_unregister_monitor(&rv_%%MODEL_NAME%%);
|
||||
}
|
||||
|
||||
module_init(register_%%MODEL_NAME%%);
|
||||
module_exit(unregister_%%MODEL_NAME%%);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("dot2k: auto-generated");
|
||||
MODULE_DESCRIPTION("%%MODEL_NAME%%: %%DESCRIPTION%%");
|
@ -1,91 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/rv.h>
|
||||
#include <rv/instrumentation.h>
|
||||
#include <rv/da_monitor.h>
|
||||
|
||||
#define MODULE_NAME "MODEL_NAME"
|
||||
|
||||
/*
|
||||
* XXX: include required tracepoint headers, e.g.,
|
||||
* #include <trace/events/sched.h>
|
||||
*/
|
||||
#include <trace/events/rv.h>
|
||||
|
||||
/*
|
||||
* This is the self-generated part of the monitor. Generally, there is no need
|
||||
* to touch this section.
|
||||
*/
|
||||
#include "MODEL_NAME.h"
|
||||
|
||||
/*
|
||||
* Declare the deterministic automata monitor.
|
||||
*
|
||||
* The rv monitor reference is needed for the monitor declaration.
|
||||
*/
|
||||
static struct rv_monitor rv_MODEL_NAME;
|
||||
DECLARE_DA_MON_GLOBAL(MODEL_NAME, MIN_TYPE);
|
||||
|
||||
/*
|
||||
* This is the instrumentation part of the monitor.
|
||||
*
|
||||
* This is the section where manual work is required. Here the kernel events
|
||||
* are translated into model's event.
|
||||
*
|
||||
*/
|
||||
TRACEPOINT_HANDLERS_SKEL
|
||||
static int enable_MODEL_NAME(void)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = da_monitor_init_MODEL_NAME();
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
TRACEPOINT_ATTACH
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void disable_MODEL_NAME(void)
|
||||
{
|
||||
rv_MODEL_NAME.enabled = 0;
|
||||
|
||||
TRACEPOINT_DETACH
|
||||
|
||||
da_monitor_destroy_MODEL_NAME();
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the monitor register section.
|
||||
*/
|
||||
static struct rv_monitor rv_MODEL_NAME = {
|
||||
.name = "MODEL_NAME",
|
||||
.description = "auto-generated MODEL_NAME",
|
||||
.enable = enable_MODEL_NAME,
|
||||
.disable = disable_MODEL_NAME,
|
||||
.reset = da_monitor_reset_all_MODEL_NAME,
|
||||
.enabled = 0,
|
||||
};
|
||||
|
||||
static int __init register_MODEL_NAME(void)
|
||||
{
|
||||
rv_register_monitor(&rv_MODEL_NAME);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit unregister_MODEL_NAME(void)
|
||||
{
|
||||
rv_unregister_monitor(&rv_MODEL_NAME);
|
||||
}
|
||||
|
||||
module_init(register_MODEL_NAME);
|
||||
module_exit(unregister_MODEL_NAME);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("dot2k: auto-generated");
|
||||
MODULE_DESCRIPTION("MODEL_NAME");
|
@ -1,91 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/rv.h>
|
||||
#include <rv/instrumentation.h>
|
||||
#include <rv/da_monitor.h>
|
||||
|
||||
#define MODULE_NAME "MODEL_NAME"
|
||||
|
||||
/*
|
||||
* XXX: include required tracepoint headers, e.g.,
|
||||
* #include <linux/trace/events/sched.h>
|
||||
*/
|
||||
#include <trace/events/rv.h>
|
||||
|
||||
/*
|
||||
* This is the self-generated part of the monitor. Generally, there is no need
|
||||
* to touch this section.
|
||||
*/
|
||||
#include "MODEL_NAME.h"
|
||||
|
||||
/*
|
||||
* Declare the deterministic automata monitor.
|
||||
*
|
||||
* The rv monitor reference is needed for the monitor declaration.
|
||||
*/
|
||||
static struct rv_monitor rv_MODEL_NAME;
|
||||
DECLARE_DA_MON_PER_CPU(MODEL_NAME, MIN_TYPE);
|
||||
|
||||
/*
|
||||
* This is the instrumentation part of the monitor.
|
||||
*
|
||||
* This is the section where manual work is required. Here the kernel events
|
||||
* are translated into model's event.
|
||||
*
|
||||
*/
|
||||
TRACEPOINT_HANDLERS_SKEL
|
||||
static int enable_MODEL_NAME(void)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = da_monitor_init_MODEL_NAME();
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
TRACEPOINT_ATTACH
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void disable_MODEL_NAME(void)
|
||||
{
|
||||
rv_MODEL_NAME.enabled = 0;
|
||||
|
||||
TRACEPOINT_DETACH
|
||||
|
||||
da_monitor_destroy_MODEL_NAME();
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the monitor register section.
|
||||
*/
|
||||
static struct rv_monitor rv_MODEL_NAME = {
|
||||
.name = "MODEL_NAME",
|
||||
.description = "auto-generated MODEL_NAME",
|
||||
.enable = enable_MODEL_NAME,
|
||||
.disable = disable_MODEL_NAME,
|
||||
.reset = da_monitor_reset_all_MODEL_NAME,
|
||||
.enabled = 0,
|
||||
};
|
||||
|
||||
static int __init register_MODEL_NAME(void)
|
||||
{
|
||||
rv_register_monitor(&rv_MODEL_NAME);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit unregister_MODEL_NAME(void)
|
||||
{
|
||||
rv_unregister_monitor(&rv_MODEL_NAME);
|
||||
}
|
||||
|
||||
module_init(register_MODEL_NAME);
|
||||
module_exit(unregister_MODEL_NAME);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("dot2k: auto-generated");
|
||||
MODULE_DESCRIPTION("MODEL_NAME");
|
@ -1,91 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/rv.h>
|
||||
#include <rv/instrumentation.h>
|
||||
#include <rv/da_monitor.h>
|
||||
|
||||
#define MODULE_NAME "MODEL_NAME"
|
||||
|
||||
/*
|
||||
* XXX: include required tracepoint headers, e.g.,
|
||||
* #include <linux/trace/events/sched.h>
|
||||
*/
|
||||
#include <trace/events/rv.h>
|
||||
|
||||
/*
|
||||
* This is the self-generated part of the monitor. Generally, there is no need
|
||||
* to touch this section.
|
||||
*/
|
||||
#include "MODEL_NAME.h"
|
||||
|
||||
/*
|
||||
* Declare the deterministic automata monitor.
|
||||
*
|
||||
* The rv monitor reference is needed for the monitor declaration.
|
||||
*/
|
||||
static struct rv_monitor rv_MODEL_NAME;
|
||||
DECLARE_DA_MON_PER_TASK(MODEL_NAME, MIN_TYPE);
|
||||
|
||||
/*
|
||||
* This is the instrumentation part of the monitor.
|
||||
*
|
||||
* This is the section where manual work is required. Here the kernel events
|
||||
* are translated into model's event.
|
||||
*
|
||||
*/
|
||||
TRACEPOINT_HANDLERS_SKEL
|
||||
static int enable_MODEL_NAME(void)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = da_monitor_init_MODEL_NAME();
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
TRACEPOINT_ATTACH
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void disable_MODEL_NAME(void)
|
||||
{
|
||||
rv_MODEL_NAME.enabled = 0;
|
||||
|
||||
TRACEPOINT_DETACH
|
||||
|
||||
da_monitor_destroy_MODEL_NAME();
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the monitor register section.
|
||||
*/
|
||||
static struct rv_monitor rv_MODEL_NAME = {
|
||||
.name = "MODEL_NAME",
|
||||
.description = "auto-generated MODEL_NAME",
|
||||
.enable = enable_MODEL_NAME,
|
||||
.disable = disable_MODEL_NAME,
|
||||
.reset = da_monitor_reset_all_MODEL_NAME,
|
||||
.enabled = 0,
|
||||
};
|
||||
|
||||
static int __init register_MODEL_NAME(void)
|
||||
{
|
||||
rv_register_monitor(&rv_MODEL_NAME);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit unregister_MODEL_NAME(void)
|
||||
{
|
||||
rv_unregister_monitor(&rv_MODEL_NAME);
|
||||
}
|
||||
|
||||
module_init(register_MODEL_NAME);
|
||||
module_exit(unregister_MODEL_NAME);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("dot2k: auto-generated");
|
||||
MODULE_DESCRIPTION("MODEL_NAME");
|
13
tools/verification/dot2/dot2k_templates/trace.h
Normal file
13
tools/verification/dot2/dot2k_templates/trace.h
Normal file
@ -0,0 +1,13 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
/*
|
||||
* Snippet to be included in rv_trace.h
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_RV_MON_%%MODEL_NAME_UP%%
|
||||
DEFINE_EVENT(event_%%MONITOR_CLASS%%, event_%%MODEL_NAME%%,
|
||||
%%TRACEPOINT_ARGS_SKEL_EVENT%%);
|
||||
|
||||
DEFINE_EVENT(error_%%MONITOR_CLASS%%, error_%%MODEL_NAME%%,
|
||||
%%TRACEPOINT_ARGS_SKEL_ERROR%%);
|
||||
#endif /* CONFIG_RV_MON_%%MODEL_NAME_UP%% */
|
Loading…
x
Reference in New Issue
Block a user