Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git

This commit is contained in:
Stephen Rothwell 2025-01-13 13:27:53 +11:00
commit 9977cf38a8
49 changed files with 1924 additions and 1849 deletions

View File

@ -658,7 +658,6 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs) struct ftrace_ops *op, struct ftrace_regs *fregs)
{ {
unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1]; unsigned long sp = arch_ftrace_regs(fregs)->regs.gpr[1];
int bit;
if (unlikely(ftrace_graph_is_dead())) if (unlikely(ftrace_graph_is_dead()))
goto out; goto out;
@ -666,14 +665,9 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
if (unlikely(atomic_read(&current->tracing_graph_pause))) if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out; goto out;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
goto out;
if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp)) if (!function_graph_enter(parent_ip, ip, 0, (unsigned long *)sp))
parent_ip = ppc_function_entry(return_to_handler); parent_ip = ppc_function_entry(return_to_handler);
ftrace_test_recursion_unlock(bit);
out: out:
arch_ftrace_regs(fregs)->regs.link = parent_ip; arch_ftrace_regs(fregs)->regs.link = parent_ip;
} }

View File

@ -790,7 +790,6 @@ static unsigned long
__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp) __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
{ {
unsigned long return_hooker; unsigned long return_hooker;
int bit;
if (unlikely(ftrace_graph_is_dead())) if (unlikely(ftrace_graph_is_dead()))
goto out; goto out;
@ -798,16 +797,11 @@ __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp
if (unlikely(atomic_read(&current->tracing_graph_pause))) if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out; goto out;
bit = ftrace_test_recursion_trylock(ip, parent);
if (bit < 0)
goto out;
return_hooker = ppc_function_entry(return_to_handler); return_hooker = ppc_function_entry(return_to_handler);
if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp)) if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
parent = return_hooker; parent = return_hooker;
ftrace_test_recursion_unlock(bit);
out: out:
return parent; return parent;
} }

View File

@ -615,7 +615,6 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
unsigned long frame_pointer) unsigned long frame_pointer)
{ {
unsigned long return_hooker = (unsigned long)&return_to_handler; unsigned long return_hooker = (unsigned long)&return_to_handler;
int bit;
/* /*
* When resuming from suspend-to-ram, this function can be indirectly * When resuming from suspend-to-ram, this function can be indirectly
@ -635,14 +634,8 @@ void prepare_ftrace_return(unsigned long ip, unsigned long *parent,
if (unlikely(atomic_read(&current->tracing_graph_pause))) if (unlikely(atomic_read(&current->tracing_graph_pause)))
return; return;
bit = ftrace_test_recursion_trylock(ip, *parent);
if (bit < 0)
return;
if (!function_graph_enter(*parent, ip, frame_pointer, parent)) if (!function_graph_enter(*parent, ip, frame_pointer, parent))
*parent = return_hooker; *parent = return_hooker;
ftrace_test_recursion_unlock(bit);
} }
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS

View File

@ -75,6 +75,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/cleanup.h>
extern bool static_key_initialized; extern bool static_key_initialized;
@ -347,6 +348,8 @@ static inline void static_key_disable(struct static_key *key)
#endif /* CONFIG_JUMP_LABEL */ #endif /* CONFIG_JUMP_LABEL */
DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock())
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
#define jump_label_enabled static_key_enabled #define jump_label_enabled static_key_enabled

View File

@ -4,6 +4,7 @@
#include <linux/args.h> #include <linux/args.h>
#include <linux/array_size.h> #include <linux/array_size.h>
#include <linux/cleanup.h> /* for DEFINE_FREE() */
#include <linux/compiler.h> /* for inline */ #include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */ #include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */ #include <linux/stddef.h> /* for NULL */
@ -312,6 +313,8 @@ extern void *kmemdup_array(const void *src, size_t count, size_t element_size, g
extern char **argv_split(gfp_t gfp, const char *str, int *argcp); extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
extern void argv_free(char **argv); extern void argv_free(char **argv);
DEFINE_FREE(argv_free, char **, if (!IS_ERR_OR_NULL(_T)) argv_free(_T))
/* lib/cmdline.c */ /* lib/cmdline.c */
extern int get_option(char **str, int *pint); extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints); extern char *get_options(const char *str, int nints, int *ints);

View File

@ -673,6 +673,20 @@ struct trace_event_file {
atomic_t tm_ref; /* trigger-mode reference counter */ atomic_t tm_ref; /* trigger-mode reference counter */
}; };
#ifdef CONFIG_HIST_TRIGGERS
extern struct irq_work hist_poll_work;
extern wait_queue_head_t hist_poll_wq;
static inline void hist_poll_wakeup(void)
{
if (wq_has_sleeper(&hist_poll_wq))
irq_work_queue(&hist_poll_work);
}
#define hist_poll_wait(file, wait) \
poll_wait(file, &hist_poll_wq, wait)
#endif
#define __TRACE_EVENT_FLAGS(name, value) \ #define __TRACE_EVENT_FLAGS(name, value) \
static int __init trace_init_flags_##name(void) \ static int __init trace_init_flags_##name(void) \
{ \ { \

View File

@ -218,7 +218,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __DEFINE_RUST_DO_TRACE(name, proto, args) \ #define __DEFINE_RUST_DO_TRACE(name, proto, args) \
notrace void rust_do_trace_##name(proto) \ notrace void rust_do_trace_##name(proto) \
{ \ { \
__rust_do_trace_##name(args); \ __do_trace_##name(args); \
} }
/* /*
@ -268,7 +268,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
static inline void __rust_do_trace_##name(proto) \ static inline void __do_trace_##name(proto) \
{ \ { \
if (cond) { \ if (cond) { \
guard(preempt_notrace)(); \ guard(preempt_notrace)(); \
@ -277,12 +277,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
} \ } \
static inline void trace_##name(proto) \ static inline void trace_##name(proto) \
{ \ { \
if (static_branch_unlikely(&__tracepoint_##name.key)) { \ if (static_branch_unlikely(&__tracepoint_##name.key)) \
if (cond) { \ __do_trace_##name(args); \
guard(preempt_notrace)(); \
__DO_TRACE_CALL(name, TP_ARGS(args)); \
} \
} \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
WARN_ONCE(!rcu_is_watching(), \ WARN_ONCE(!rcu_is_watching(), \
"RCU not watching for tracepoint"); \ "RCU not watching for tracepoint"); \
@ -291,7 +287,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ #define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \
__DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \
static inline void __rust_do_trace_##name(proto) \ static inline void __do_trace_##name(proto) \
{ \ { \
guard(rcu_tasks_trace)(); \ guard(rcu_tasks_trace)(); \
__DO_TRACE_CALL(name, TP_ARGS(args)); \ __DO_TRACE_CALL(name, TP_ARGS(args)); \
@ -299,10 +295,8 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
static inline void trace_##name(proto) \ static inline void trace_##name(proto) \
{ \ { \
might_fault(); \ might_fault(); \
if (static_branch_unlikely(&__tracepoint_##name.key)) { \ if (static_branch_unlikely(&__tracepoint_##name.key)) \
guard(rcu_tasks_trace)(); \ __do_trace_##name(args); \
__DO_TRACE_CALL(name, TP_ARGS(args)); \
} \
if (IS_ENABLED(CONFIG_LOCKDEP)) { \ if (IS_ENABLED(CONFIG_LOCKDEP)) { \
WARN_ONCE(!rcu_is_watching(), \ WARN_ONCE(!rcu_is_watching(), \
"RCU not watching for tracepoint"); \ "RCU not watching for tracepoint"); \

View File

@ -39,6 +39,7 @@
#include <linux/static_call.h> #include <linux/static_call.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/execmem.h> #include <linux/execmem.h>
#include <linux/cleanup.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
@ -140,12 +141,11 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c);
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
{ {
struct kprobe_insn_page *kip; struct kprobe_insn_page *kip;
kprobe_opcode_t *slot = NULL;
/* Since the slot array is not protected by rcu, we need a mutex */ /* Since the slot array is not protected by rcu, we need a mutex */
mutex_lock(&c->mutex); guard(mutex)(&c->mutex);
retry: do {
rcu_read_lock(); guard(rcu)();
list_for_each_entry_rcu(kip, &c->pages, list) { list_for_each_entry_rcu(kip, &c->pages, list) {
if (kip->nused < slots_per_page(c)) { if (kip->nused < slots_per_page(c)) {
int i; int i;
@ -154,9 +154,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
if (kip->slot_used[i] == SLOT_CLEAN) { if (kip->slot_used[i] == SLOT_CLEAN) {
kip->slot_used[i] = SLOT_USED; kip->slot_used[i] = SLOT_USED;
kip->nused++; kip->nused++;
slot = kip->insns + (i * c->insn_size); return kip->insns + (i * c->insn_size);
rcu_read_unlock();
goto out;
} }
} }
/* kip->nused is broken. Fix it. */ /* kip->nused is broken. Fix it. */
@ -164,21 +162,18 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
WARN_ON(1); WARN_ON(1);
} }
} }
rcu_read_unlock();
/* If there are any garbage slots, collect it and try again. */ /* If there are any garbage slots, collect it and try again. */
if (c->nr_garbage && collect_garbage_slots(c) == 0) } while (c->nr_garbage && collect_garbage_slots(c) == 0);
goto retry;
/* All out of space. Need to allocate a new page. */ /* All out of space. Need to allocate a new page. */
kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL); kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL);
if (!kip) if (!kip)
goto out; return NULL;
kip->insns = c->alloc(); kip->insns = c->alloc();
if (!kip->insns) { if (!kip->insns) {
kfree(kip); kfree(kip);
goto out; return NULL;
} }
INIT_LIST_HEAD(&kip->list); INIT_LIST_HEAD(&kip->list);
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
@ -187,14 +182,12 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
kip->ngarbage = 0; kip->ngarbage = 0;
kip->cache = c; kip->cache = c;
list_add_rcu(&kip->list, &c->pages); list_add_rcu(&kip->list, &c->pages);
slot = kip->insns;
/* Record the perf ksymbol register event after adding the page */ /* Record the perf ksymbol register event after adding the page */
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
PAGE_SIZE, false, c->sym); PAGE_SIZE, false, c->sym);
out:
mutex_unlock(&c->mutex); return kip->insns;
return slot;
} }
/* Return true if all garbages are collected, otherwise false. */ /* Return true if all garbages are collected, otherwise false. */
@ -249,25 +242,35 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
return 0; return 0;
} }
void __free_insn_slot(struct kprobe_insn_cache *c, static long __find_insn_page(struct kprobe_insn_cache *c,
kprobe_opcode_t *slot, int dirty) kprobe_opcode_t *slot, struct kprobe_insn_page **pkip)
{ {
struct kprobe_insn_page *kip; struct kprobe_insn_page *kip = NULL;
long idx; long idx;
mutex_lock(&c->mutex); guard(rcu)();
rcu_read_lock();
list_for_each_entry_rcu(kip, &c->pages, list) { list_for_each_entry_rcu(kip, &c->pages, list) {
idx = ((long)slot - (long)kip->insns) / idx = ((long)slot - (long)kip->insns) /
(c->insn_size * sizeof(kprobe_opcode_t)); (c->insn_size * sizeof(kprobe_opcode_t));
if (idx >= 0 && idx < slots_per_page(c)) if (idx >= 0 && idx < slots_per_page(c)) {
goto out; *pkip = kip;
return idx;
}
} }
/* Could not find this slot. */ /* Could not find this slot. */
WARN_ON(1); WARN_ON(1);
kip = NULL; *pkip = NULL;
out: return -1;
rcu_read_unlock(); }
void __free_insn_slot(struct kprobe_insn_cache *c,
kprobe_opcode_t *slot, int dirty)
{
struct kprobe_insn_page *kip = NULL;
long idx;
guard(mutex)(&c->mutex);
idx = __find_insn_page(c, slot, &kip);
/* Mark and sweep: this may sleep */ /* Mark and sweep: this may sleep */
if (kip) { if (kip) {
/* Check double free */ /* Check double free */
@ -281,7 +284,6 @@ out:
collect_one_slot(kip, idx); collect_one_slot(kip, idx);
} }
} }
mutex_unlock(&c->mutex);
} }
/* /*
@ -600,9 +602,10 @@ static void kick_kprobe_optimizer(void)
/* Kprobe jump optimizer */ /* Kprobe jump optimizer */
static void kprobe_optimizer(struct work_struct *work) static void kprobe_optimizer(struct work_struct *work)
{ {
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
cpus_read_lock();
mutex_lock(&text_mutex); scoped_guard(cpus_read_lock) {
guard(mutex)(&text_mutex);
/* /*
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
@ -626,21 +629,16 @@ static void kprobe_optimizer(struct work_struct *work)
/* Step 4: Free cleaned kprobes after quiesence period */ /* Step 4: Free cleaned kprobes after quiesence period */
do_free_cleaned_kprobes(); do_free_cleaned_kprobes();
}
mutex_unlock(&text_mutex);
cpus_read_unlock();
/* Step 5: Kick optimizer again if needed */ /* Step 5: Kick optimizer again if needed */
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
kick_kprobe_optimizer(); kick_kprobe_optimizer();
mutex_unlock(&kprobe_mutex);
} }
/* Wait for completing optimization and unoptimization */ static void wait_for_kprobe_optimizer_locked(void)
void wait_for_kprobe_optimizer(void)
{ {
mutex_lock(&kprobe_mutex); lockdep_assert_held(&kprobe_mutex);
while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
@ -652,8 +650,14 @@ void wait_for_kprobe_optimizer(void)
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
} }
}
mutex_unlock(&kprobe_mutex); /* Wait for completing optimization and unoptimization */
void wait_for_kprobe_optimizer(void)
{
guard(mutex)(&kprobe_mutex);
wait_for_kprobe_optimizer_locked();
} }
bool optprobe_queued_unopt(struct optimized_kprobe *op) bool optprobe_queued_unopt(struct optimized_kprobe *op)
@ -852,29 +856,24 @@ static void try_to_optimize_kprobe(struct kprobe *p)
return; return;
/* For preparing optimization, jump_label_text_reserved() is called. */ /* For preparing optimization, jump_label_text_reserved() is called. */
cpus_read_lock(); guard(cpus_read_lock)();
jump_label_lock(); guard(jump_label_lock)();
mutex_lock(&text_mutex); guard(mutex)(&text_mutex);
ap = alloc_aggr_kprobe(p); ap = alloc_aggr_kprobe(p);
if (!ap) if (!ap)
goto out; return;
op = container_of(ap, struct optimized_kprobe, kp); op = container_of(ap, struct optimized_kprobe, kp);
if (!arch_prepared_optinsn(&op->optinsn)) { if (!arch_prepared_optinsn(&op->optinsn)) {
/* If failed to setup optimizing, fallback to kprobe. */ /* If failed to setup optimizing, fallback to kprobe. */
arch_remove_optimized_kprobe(op); arch_remove_optimized_kprobe(op);
kfree(op); kfree(op);
goto out; return;
} }
init_aggr_kprobe(ap, p); init_aggr_kprobe(ap, p);
optimize_kprobe(ap); /* This just kicks optimizer thread. */ optimize_kprobe(ap); /* This just kicks optimizer thread. */
out:
mutex_unlock(&text_mutex);
jump_label_unlock();
cpus_read_unlock();
} }
static void optimize_all_kprobes(void) static void optimize_all_kprobes(void)
@ -883,10 +882,10 @@ static void optimize_all_kprobes(void)
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* If optimization is already allowed, just return. */ /* If optimization is already allowed, just return. */
if (kprobes_allow_optimization) if (kprobes_allow_optimization)
goto out; return;
cpus_read_lock(); cpus_read_lock();
kprobes_allow_optimization = true; kprobes_allow_optimization = true;
@ -898,8 +897,6 @@ static void optimize_all_kprobes(void)
} }
cpus_read_unlock(); cpus_read_unlock();
pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
out:
mutex_unlock(&kprobe_mutex);
} }
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
@ -909,12 +906,10 @@ static void unoptimize_all_kprobes(void)
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* If optimization is already prohibited, just return. */ /* If optimization is already prohibited, just return. */
if (!kprobes_allow_optimization) { if (!kprobes_allow_optimization)
mutex_unlock(&kprobe_mutex);
return; return;
}
cpus_read_lock(); cpus_read_lock();
kprobes_allow_optimization = false; kprobes_allow_optimization = false;
@ -926,10 +921,8 @@ static void unoptimize_all_kprobes(void)
} }
} }
cpus_read_unlock(); cpus_read_unlock();
mutex_unlock(&kprobe_mutex);
/* Wait for unoptimizing completion. */ /* Wait for unoptimizing completion. */
wait_for_kprobe_optimizer(); wait_for_kprobe_optimizer_locked();
pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
} }
@ -941,7 +934,7 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table,
{ {
int ret; int ret;
mutex_lock(&kprobe_sysctl_mutex); guard(mutex)(&kprobe_sysctl_mutex);
sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos); ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
@ -949,7 +942,6 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table,
optimize_all_kprobes(); optimize_all_kprobes();
else else
unoptimize_all_kprobes(); unoptimize_all_kprobes();
mutex_unlock(&kprobe_sysctl_mutex);
return ret; return ret;
} }
@ -1024,7 +1016,8 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
#define __arm_kprobe(p) arch_arm_kprobe(p) #define __arm_kprobe(p) arch_arm_kprobe(p)
#define __disarm_kprobe(p, o) arch_disarm_kprobe(p) #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
#define kprobe_disarmed(p) kprobe_disabled(p) #define kprobe_disarmed(p) kprobe_disabled(p)
#define wait_for_kprobe_optimizer() do {} while (0) #define wait_for_kprobe_optimizer_locked() \
lockdep_assert_held(&kprobe_mutex)
static int reuse_unused_kprobe(struct kprobe *ap) static int reuse_unused_kprobe(struct kprobe *ap)
{ {
@ -1078,14 +1071,7 @@ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
if (*cnt == 0) { if (*cnt == 0) {
ret = register_ftrace_function(ops); ret = register_ftrace_function(ops);
if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) {
goto err_ftrace;
}
(*cnt)++;
return ret;
err_ftrace:
/* /*
* At this point, sinec ops is not registered, we should be sefe from * At this point, sinec ops is not registered, we should be sefe from
* registering empty filter. * registering empty filter.
@ -1093,6 +1079,11 @@ err_ftrace:
ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
return ret; return ret;
} }
}
(*cnt)++;
return ret;
}
static int arm_kprobe_ftrace(struct kprobe *p) static int arm_kprobe_ftrace(struct kprobe *p)
{ {
@ -1163,12 +1154,9 @@ static int arm_kprobe(struct kprobe *kp)
if (unlikely(kprobe_ftrace(kp))) if (unlikely(kprobe_ftrace(kp)))
return arm_kprobe_ftrace(kp); return arm_kprobe_ftrace(kp);
cpus_read_lock(); guard(cpus_read_lock)();
mutex_lock(&text_mutex); guard(mutex)(&text_mutex);
__arm_kprobe(kp); __arm_kprobe(kp);
mutex_unlock(&text_mutex);
cpus_read_unlock();
return 0; return 0;
} }
@ -1177,12 +1165,9 @@ static int disarm_kprobe(struct kprobe *kp, bool reopt)
if (unlikely(kprobe_ftrace(kp))) if (unlikely(kprobe_ftrace(kp)))
return disarm_kprobe_ftrace(kp); return disarm_kprobe_ftrace(kp);
cpus_read_lock(); guard(cpus_read_lock)();
mutex_lock(&text_mutex); guard(mutex)(&text_mutex);
__disarm_kprobe(kp, reopt); __disarm_kprobe(kp, reopt);
mutex_unlock(&text_mutex);
cpus_read_unlock();
return 0; return 0;
} }
@ -1299,25 +1284,22 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
int ret = 0; int ret = 0;
struct kprobe *ap = orig_p; struct kprobe *ap = orig_p;
cpus_read_lock(); scoped_guard(cpus_read_lock) {
/* For preparing optimization, jump_label_text_reserved() is called */ /* For preparing optimization, jump_label_text_reserved() is called */
jump_label_lock(); guard(jump_label_lock)();
mutex_lock(&text_mutex); guard(mutex)(&text_mutex);
if (!kprobe_aggrprobe(orig_p)) { if (!kprobe_aggrprobe(orig_p)) {
/* If 'orig_p' is not an 'aggr_kprobe', create new one. */ /* If 'orig_p' is not an 'aggr_kprobe', create new one. */
ap = alloc_aggr_kprobe(orig_p); ap = alloc_aggr_kprobe(orig_p);
if (!ap) { if (!ap)
ret = -ENOMEM; return -ENOMEM;
goto out;
}
init_aggr_kprobe(ap, orig_p); init_aggr_kprobe(ap, orig_p);
} else if (kprobe_unused(ap)) { } else if (kprobe_unused(ap)) {
/* This probe is going to die. Rescue it */ /* This probe is going to die. Rescue it */
ret = reuse_unused_kprobe(ap); ret = reuse_unused_kprobe(ap);
if (ret) if (ret)
goto out; return ret;
} }
if (kprobe_gone(ap)) { if (kprobe_gone(ap)) {
@ -1334,7 +1316,7 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
* free the 'ap'. It will be used next time, or * free the 'ap'. It will be used next time, or
* freed by unregister_kprobe(). * freed by unregister_kprobe().
*/ */
goto out; return ret;
/* Prepare optimized instructions if possible. */ /* Prepare optimized instructions if possible. */
prepare_optimized_kprobe(ap); prepare_optimized_kprobe(ap);
@ -1350,11 +1332,7 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
/* Copy the insn slot of 'p' to 'ap'. */ /* Copy the insn slot of 'p' to 'ap'. */
copy_kprobe(ap, p); copy_kprobe(ap, p);
ret = add_new_kprobe(ap, p); ret = add_new_kprobe(ap, p);
}
out:
mutex_unlock(&text_mutex);
jump_label_unlock();
cpus_read_unlock();
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED; ap->flags &= ~KPROBE_FLAG_DISABLED;
@ -1448,7 +1426,7 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
unsigned long offset, bool *on_func_entry) unsigned long offset, bool *on_func_entry)
{ {
if ((symbol_name && addr) || (!symbol_name && !addr)) if ((symbol_name && addr) || (!symbol_name && !addr))
goto invalid; return ERR_PTR(-EINVAL);
if (symbol_name) { if (symbol_name) {
/* /*
@ -1478,16 +1456,16 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
* at the start of the function. * at the start of the function.
*/ */
addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry); addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
if (addr) if (!addr)
return addr;
invalid:
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
return addr;
} }
static kprobe_opcode_t *kprobe_addr(struct kprobe *p) static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
{ {
bool on_func_entry; bool on_func_entry;
return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
} }
@ -1505,15 +1483,15 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
if (unlikely(!ap)) if (unlikely(!ap))
return NULL; return NULL;
if (p != ap) { if (p == ap)
return ap;
list_for_each_entry(list_p, &ap->list, list) list_for_each_entry(list_p, &ap->list, list)
if (list_p == p) if (list_p == p)
/* kprobe p is a valid probe */ /* kprobe p is a valid probe */
goto valid;
return NULL;
}
valid:
return ap; return ap;
return NULL;
} }
/* /*
@ -1522,14 +1500,12 @@ valid:
*/ */
static inline int warn_kprobe_rereg(struct kprobe *p) static inline int warn_kprobe_rereg(struct kprobe *p)
{ {
int ret = 0; guard(mutex)(&kprobe_mutex);
mutex_lock(&kprobe_mutex);
if (WARN_ON_ONCE(__get_valid_kprobe(p))) if (WARN_ON_ONCE(__get_valid_kprobe(p)))
ret = -EINVAL; return -EINVAL;
mutex_unlock(&kprobe_mutex);
return ret; return 0;
} }
static int check_ftrace_location(struct kprobe *p) static int check_ftrace_location(struct kprobe *p)
@ -1565,17 +1541,23 @@ static int check_kprobe_address_safe(struct kprobe *p,
ret = check_ftrace_location(p); ret = check_ftrace_location(p);
if (ret) if (ret)
return ret; return ret;
jump_label_lock();
preempt_disable(); guard(jump_label_lock)();
/* Ensure the address is in a text area, and find a module if exists. */ /* Ensure the address is in a text area, and find a module if exists. */
*probed_mod = NULL; *probed_mod = NULL;
if (!core_kernel_text((unsigned long) p->addr)) { if (!core_kernel_text((unsigned long) p->addr)) {
guard(preempt)();
*probed_mod = __module_text_address((unsigned long) p->addr); *probed_mod = __module_text_address((unsigned long) p->addr);
if (!(*probed_mod)) { if (!(*probed_mod))
ret = -EINVAL; return -EINVAL;
goto out;
} /*
* We must hold a refcount of the probed module while updating
* its code to prohibit unexpected unloading.
*/
if (unlikely(!try_module_get(*probed_mod)))
return -ENOENT;
} }
/* Ensure it is not in reserved area. */ /* Ensure it is not in reserved area. */
if (in_gate_area_no_mm((unsigned long) p->addr) || if (in_gate_area_no_mm((unsigned long) p->addr) ||
@ -1584,21 +1566,12 @@ static int check_kprobe_address_safe(struct kprobe *p,
static_call_text_reserved(p->addr, p->addr) || static_call_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr) || find_bug((unsigned long)p->addr) ||
is_cfi_preamble_symbol((unsigned long)p->addr)) { is_cfi_preamble_symbol((unsigned long)p->addr)) {
ret = -EINVAL; module_put(*probed_mod);
goto out; return -EINVAL;
} }
/* Get module refcount and reject __init functions for loaded modules. */ /* Get module refcount and reject __init functions for loaded modules. */
if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) { if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
/*
* We must hold a refcount of the probed module while updating
* its code to prohibit unexpected unloading.
*/
if (unlikely(!try_module_get(*probed_mod))) {
ret = -ENOENT;
goto out;
}
/* /*
* If the module freed '.init.text', we couldn't insert * If the module freed '.init.text', we couldn't insert
* kprobes in there. * kprobes in there.
@ -1606,27 +1579,58 @@ static int check_kprobe_address_safe(struct kprobe *p,
if (within_module_init((unsigned long)p->addr, *probed_mod) && if (within_module_init((unsigned long)p->addr, *probed_mod) &&
!module_is_coming(*probed_mod)) { !module_is_coming(*probed_mod)) {
module_put(*probed_mod); module_put(*probed_mod);
*probed_mod = NULL; return -ENOENT;
ret = -ENOENT;
} }
} }
out: return 0;
preempt_enable(); }
jump_label_unlock();
static int __register_kprobe(struct kprobe *p)
{
int ret;
struct kprobe *old_p;
guard(mutex)(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (old_p)
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
return register_aggr_kprobe(old_p, p);
scoped_guard(cpus_read_lock) {
/* Prevent text modification */
guard(mutex)(&text_mutex);
ret = prepare_kprobe(p);
if (ret)
return ret; return ret;
} }
INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
ret = arm_kprobe(p);
if (ret) {
hlist_del_rcu(&p->hlist);
synchronize_rcu();
}
}
/* Try to optimize kprobe */
try_to_optimize_kprobe(p);
return 0;
}
int register_kprobe(struct kprobe *p) int register_kprobe(struct kprobe *p)
{ {
int ret; int ret;
struct kprobe *old_p;
struct module *probed_mod; struct module *probed_mod;
kprobe_opcode_t *addr; kprobe_opcode_t *addr;
bool on_func_entry; bool on_func_entry;
/* Adjust probe address from symbol */ /* Canonicalize probe address from symbol */
addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
if (IS_ERR(addr)) if (IS_ERR(addr))
return PTR_ERR(addr); return PTR_ERR(addr);
@ -1638,6 +1642,8 @@ int register_kprobe(struct kprobe *p)
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
p->flags &= KPROBE_FLAG_DISABLED; p->flags &= KPROBE_FLAG_DISABLED;
if (on_func_entry)
p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
p->nmissed = 0; p->nmissed = 0;
INIT_LIST_HEAD(&p->list); INIT_LIST_HEAD(&p->list);
@ -1645,44 +1651,7 @@ int register_kprobe(struct kprobe *p)
if (ret) if (ret)
return ret; return ret;
mutex_lock(&kprobe_mutex); ret = __register_kprobe(p);
if (on_func_entry)
p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
old_p = get_kprobe(p->addr);
if (old_p) {
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
ret = register_aggr_kprobe(old_p, p);
goto out;
}
cpus_read_lock();
/* Prevent text modification */
mutex_lock(&text_mutex);
ret = prepare_kprobe(p);
mutex_unlock(&text_mutex);
cpus_read_unlock();
if (ret)
goto out;
INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
ret = arm_kprobe(p);
if (ret) {
hlist_del_rcu(&p->hlist);
synchronize_rcu();
goto out;
}
}
/* Try to optimize kprobe */
try_to_optimize_kprobe(p);
out:
mutex_unlock(&kprobe_mutex);
if (probed_mod) if (probed_mod)
module_put(probed_mod); module_put(probed_mod);
@ -1761,29 +1730,31 @@ static int __unregister_kprobe_top(struct kprobe *p)
if (IS_ERR(ap)) if (IS_ERR(ap))
return PTR_ERR(ap); return PTR_ERR(ap);
if (ap == p) WARN_ON(ap != p && !kprobe_aggrprobe(ap));
/* /*
* This probe is an independent(and non-optimized) kprobe * If the probe is an independent(and non-optimized) kprobe
* (not an aggrprobe). Remove from the hash list. * (not an aggrprobe), the last kprobe on the aggrprobe, or
* kprobe is already disarmed, just remove from the hash list.
*/ */
goto disarmed; if (ap == p ||
(list_is_singular(&ap->list) && kprobe_disarmed(ap))) {
/* Following process expects this probe is an aggrprobe */
WARN_ON(!kprobe_aggrprobe(ap));
if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
/* /*
* !disarmed could be happen if the probe is under delayed * !disarmed could be happen if the probe is under delayed
* unoptimizing. * unoptimizing.
*/ */
goto disarmed; hlist_del_rcu(&ap->hlist);
else { return 0;
}
/* If disabling probe has special handlers, update aggrprobe */ /* If disabling probe has special handlers, update aggrprobe */
if (p->post_handler && !kprobe_gone(p)) { if (p->post_handler && !kprobe_gone(p)) {
list_for_each_entry(list_p, &ap->list, list) { list_for_each_entry(list_p, &ap->list, list) {
if ((list_p != p) && (list_p->post_handler)) if ((list_p != p) && (list_p->post_handler))
goto noclean; break;
} }
/* No other probe has post_handler */
if (list_entry_is_head(list_p, &ap->list, list)) {
/* /*
* For the kprobe-on-ftrace case, we keep the * For the kprobe-on-ftrace case, we keep the
* post_handler setting to identify this aggrprobe * post_handler setting to identify this aggrprobe
@ -1792,7 +1763,8 @@ static int __unregister_kprobe_top(struct kprobe *p)
if (!kprobe_ftrace(ap)) if (!kprobe_ftrace(ap))
ap->post_handler = NULL; ap->post_handler = NULL;
} }
noclean: }
/* /*
* Remove from the aggrprobe: this path will do nothing in * Remove from the aggrprobe: this path will do nothing in
* __unregister_kprobe_bottom(). * __unregister_kprobe_bottom().
@ -1804,12 +1776,8 @@ noclean:
* handler may have been changed. * handler may have been changed.
*/ */
optimize_kprobe(ap); optimize_kprobe(ap);
}
return 0; return 0;
disarmed:
hlist_del_rcu(&ap->hlist);
return 0;
} }
static void __unregister_kprobe_bottom(struct kprobe *p) static void __unregister_kprobe_bottom(struct kprobe *p)
@ -1858,12 +1826,11 @@ void unregister_kprobes(struct kprobe **kps, int num)
if (num <= 0) if (num <= 0)
return; return;
mutex_lock(&kprobe_mutex); scoped_guard(mutex, &kprobe_mutex) {
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
if (__unregister_kprobe_top(kps[i]) < 0) if (__unregister_kprobe_top(kps[i]) < 0)
kps[i]->addr = NULL; kps[i]->addr = NULL;
mutex_unlock(&kprobe_mutex); }
synchronize_rcu(); synchronize_rcu();
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
if (kps[i]->addr) if (kps[i]->addr)
@ -2302,8 +2269,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
if (num <= 0) if (num <= 0)
return; return;
mutex_lock(&kprobe_mutex);
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
guard(mutex)(&kprobe_mutex);
if (__unregister_kprobe_top(&rps[i]->kp) < 0) if (__unregister_kprobe_top(&rps[i]->kp) < 0)
rps[i]->kp.addr = NULL; rps[i]->kp.addr = NULL;
#ifdef CONFIG_KRETPROBE_ON_RETHOOK #ifdef CONFIG_KRETPROBE_ON_RETHOOK
@ -2312,7 +2280,6 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
rcu_assign_pointer(rps[i]->rph->rp, NULL); rcu_assign_pointer(rps[i]->rph->rp, NULL);
#endif #endif
} }
mutex_unlock(&kprobe_mutex);
synchronize_rcu(); synchronize_rcu();
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
@ -2393,18 +2360,14 @@ static void kill_kprobe(struct kprobe *p)
/* Disable one kprobe */ /* Disable one kprobe */
int disable_kprobe(struct kprobe *kp) int disable_kprobe(struct kprobe *kp)
{ {
int ret = 0;
struct kprobe *p; struct kprobe *p;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* Disable this kprobe */ /* Disable this kprobe */
p = __disable_kprobe(kp); p = __disable_kprobe(kp);
if (IS_ERR(p))
ret = PTR_ERR(p);
mutex_unlock(&kprobe_mutex); return IS_ERR(p) ? PTR_ERR(p) : 0;
return ret;
} }
EXPORT_SYMBOL_GPL(disable_kprobe); EXPORT_SYMBOL_GPL(disable_kprobe);
@ -2414,20 +2377,16 @@ int enable_kprobe(struct kprobe *kp)
int ret = 0; int ret = 0;
struct kprobe *p; struct kprobe *p;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* Check whether specified probe is valid. */ /* Check whether specified probe is valid. */
p = __get_valid_kprobe(kp); p = __get_valid_kprobe(kp);
if (unlikely(p == NULL)) { if (unlikely(p == NULL))
ret = -EINVAL; return -EINVAL;
goto out;
}
if (kprobe_gone(kp)) { if (kprobe_gone(kp))
/* This kprobe has gone, we couldn't enable it. */ /* This kprobe has gone, we couldn't enable it. */
ret = -EINVAL; return -EINVAL;
goto out;
}
if (p != kp) if (p != kp)
kp->flags &= ~KPROBE_FLAG_DISABLED; kp->flags &= ~KPROBE_FLAG_DISABLED;
@ -2441,8 +2400,6 @@ int enable_kprobe(struct kprobe *kp)
kp->flags |= KPROBE_FLAG_DISABLED; kp->flags |= KPROBE_FLAG_DISABLED;
} }
} }
out:
mutex_unlock(&kprobe_mutex);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(enable_kprobe); EXPORT_SYMBOL_GPL(enable_kprobe);
@ -2630,11 +2587,11 @@ static int kprobes_module_callback(struct notifier_block *nb,
unsigned int i; unsigned int i;
int checkcore = (val == MODULE_STATE_GOING); int checkcore = (val == MODULE_STATE_GOING);
if (val == MODULE_STATE_COMING) { guard(mutex)(&kprobe_mutex);
mutex_lock(&kprobe_mutex);
if (val == MODULE_STATE_COMING)
add_module_kprobe_blacklist(mod); add_module_kprobe_blacklist(mod);
mutex_unlock(&kprobe_mutex);
}
if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
return NOTIFY_DONE; return NOTIFY_DONE;
@ -2644,7 +2601,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
* notified, only '.init.text' section would be freed. We need to * notified, only '.init.text' section would be freed. We need to
* disable kprobes which have been inserted in the sections. * disable kprobes which have been inserted in the sections.
*/ */
mutex_lock(&kprobe_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry(p, head, hlist) hlist_for_each_entry(p, head, hlist)
@ -2667,7 +2623,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
} }
if (val == MODULE_STATE_GOING) if (val == MODULE_STATE_GOING)
remove_module_kprobe_blacklist(mod); remove_module_kprobe_blacklist(mod);
mutex_unlock(&kprobe_mutex);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
@ -2695,7 +2650,7 @@ void kprobe_free_init_mem(void)
struct kprobe *p; struct kprobe *p;
int i; int i;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* Kill all kprobes on initmem because the target code has been freed. */ /* Kill all kprobes on initmem because the target code has been freed. */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@ -2705,8 +2660,6 @@ void kprobe_free_init_mem(void)
kill_kprobe(p); kill_kprobe(p);
} }
} }
mutex_unlock(&kprobe_mutex);
} }
static int __init init_kprobes(void) static int __init init_kprobes(void)
@ -2902,11 +2855,11 @@ static int arm_all_kprobes(void)
unsigned int i, total = 0, errors = 0; unsigned int i, total = 0, errors = 0;
int err, ret = 0; int err, ret = 0;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* If kprobes are armed, just return */ /* If kprobes are armed, just return */
if (!kprobes_all_disarmed) if (!kprobes_all_disarmed)
goto already_enabled; return 0;
/* /*
* optimize_kprobe() called by arm_kprobe() checks * optimize_kprobe() called by arm_kprobe() checks
@ -2936,8 +2889,6 @@ static int arm_all_kprobes(void)
else else
pr_info("Kprobes globally enabled\n"); pr_info("Kprobes globally enabled\n");
already_enabled:
mutex_unlock(&kprobe_mutex);
return ret; return ret;
} }
@ -2948,13 +2899,11 @@ static int disarm_all_kprobes(void)
unsigned int i, total = 0, errors = 0; unsigned int i, total = 0, errors = 0;
int err, ret = 0; int err, ret = 0;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* If kprobes are already disarmed, just return */ /* If kprobes are already disarmed, just return */
if (kprobes_all_disarmed) { if (kprobes_all_disarmed)
mutex_unlock(&kprobe_mutex);
return 0; return 0;
}
kprobes_all_disarmed = true; kprobes_all_disarmed = true;
@ -2979,11 +2928,8 @@ static int disarm_all_kprobes(void)
else else
pr_info("Kprobes globally disabled\n"); pr_info("Kprobes globally disabled\n");
mutex_unlock(&kprobe_mutex);
/* Wait for disarming all kprobes by optimizer */ /* Wait for disarming all kprobes by optimizer */
wait_for_kprobe_optimizer(); wait_for_kprobe_optimizer_locked();
return ret; return ret;
} }

View File

@ -650,8 +650,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
struct ftrace_graph_ent trace; struct ftrace_graph_ent trace;
unsigned long bitmap = 0; unsigned long bitmap = 0;
int offset; int offset;
int bit;
int i; int i;
bit = ftrace_test_recursion_trylock(func, ret);
if (bit < 0)
return -EBUSY;
trace.func = func; trace.func = func;
trace.depth = ++current->curr_ret_depth; trace.depth = ++current->curr_ret_depth;
@ -697,12 +702,13 @@ int function_graph_enter(unsigned long ret, unsigned long func,
* flag, set that bit always. * flag, set that bit always.
*/ */
set_bitmap(current, offset, bitmap | BIT(0)); set_bitmap(current, offset, bitmap | BIT(0));
ftrace_test_recursion_unlock(bit);
return 0; return 0;
out_ret: out_ret:
current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1; current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
out: out:
current->curr_ret_depth--; current->curr_ret_depth--;
ftrace_test_recursion_unlock(bit);
return -EBUSY; return -EBUSY;
} }

View File

@ -536,24 +536,21 @@ static int function_stat_show(struct seq_file *m, void *v)
{ {
struct ftrace_profile *rec = v; struct ftrace_profile *rec = v;
char str[KSYM_SYMBOL_LEN]; char str[KSYM_SYMBOL_LEN];
int ret = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
static struct trace_seq s; static struct trace_seq s;
unsigned long long avg; unsigned long long avg;
unsigned long long stddev; unsigned long long stddev;
#endif #endif
mutex_lock(&ftrace_profile_lock); guard(mutex)(&ftrace_profile_lock);
/* we raced with function_profile_reset() */ /* we raced with function_profile_reset() */
if (unlikely(rec->counter == 0)) { if (unlikely(rec->counter == 0))
ret = -EBUSY; return -EBUSY;
goto out;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
avg = div64_ul(rec->time, rec->counter); avg = div64_ul(rec->time, rec->counter);
if (tracing_thresh && (avg < tracing_thresh)) if (tracing_thresh && (avg < tracing_thresh))
goto out; return 0;
#endif #endif
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
@ -590,10 +587,8 @@ static int function_stat_show(struct seq_file *m, void *v)
trace_print_seq(m, &s); trace_print_seq(m, &s);
#endif #endif
seq_putc(m, '\n'); seq_putc(m, '\n');
out:
mutex_unlock(&ftrace_profile_lock);
return ret; return 0;
} }
static void ftrace_profile_reset(struct ftrace_profile_stat *stat) static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@ -789,27 +784,24 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
{ {
struct ftrace_profile_stat *stat; struct ftrace_profile_stat *stat;
struct ftrace_profile *rec; struct ftrace_profile *rec;
unsigned long flags;
if (!ftrace_profile_enabled) if (!ftrace_profile_enabled)
return; return;
local_irq_save(flags); guard(preempt_notrace)();
stat = this_cpu_ptr(&ftrace_profile_stats); stat = this_cpu_ptr(&ftrace_profile_stats);
if (!stat->hash || !ftrace_profile_enabled) if (!stat->hash || !ftrace_profile_enabled)
goto out; return;
rec = ftrace_find_profiled_func(stat, ip); rec = ftrace_find_profiled_func(stat, ip);
if (!rec) { if (!rec) {
rec = ftrace_profile_alloc(stat, ip); rec = ftrace_profile_alloc(stat, ip);
if (!rec) if (!rec)
goto out; return;
} }
rec->counter++; rec->counter++;
out:
local_irq_restore(flags);
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@ -856,19 +848,19 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
unsigned long long calltime; unsigned long long calltime;
unsigned long long rettime = trace_clock_local(); unsigned long long rettime = trace_clock_local();
struct ftrace_profile *rec; struct ftrace_profile *rec;
unsigned long flags;
int size; int size;
local_irq_save(flags); guard(preempt_notrace)();
stat = this_cpu_ptr(&ftrace_profile_stats); stat = this_cpu_ptr(&ftrace_profile_stats);
if (!stat->hash || !ftrace_profile_enabled) if (!stat->hash || !ftrace_profile_enabled)
goto out; return;
profile_data = fgraph_retrieve_data(gops->idx, &size); profile_data = fgraph_retrieve_data(gops->idx, &size);
/* If the calltime was zero'd ignore it */ /* If the calltime was zero'd ignore it */
if (!profile_data || !profile_data->calltime) if (!profile_data || !profile_data->calltime)
goto out; return;
calltime = rettime - profile_data->calltime; calltime = rettime - profile_data->calltime;
@ -896,9 +888,6 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
rec->time += calltime; rec->time += calltime;
rec->time_squared += calltime * calltime; rec->time_squared += calltime * calltime;
} }
out:
local_irq_restore(flags);
} }
static struct fgraph_ops fprofiler_ops = { static struct fgraph_ops fprofiler_ops = {
@ -946,20 +935,16 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
val = !!val; val = !!val;
mutex_lock(&ftrace_profile_lock); guard(mutex)(&ftrace_profile_lock);
if (ftrace_profile_enabled ^ val) { if (ftrace_profile_enabled ^ val) {
if (val) { if (val) {
ret = ftrace_profile_init(); ret = ftrace_profile_init();
if (ret < 0) { if (ret < 0)
cnt = ret; return ret;
goto out;
}
ret = register_ftrace_profiler(); ret = register_ftrace_profiler();
if (ret < 0) { if (ret < 0)
cnt = ret; return ret;
goto out;
}
ftrace_profile_enabled = 1; ftrace_profile_enabled = 1;
} else { } else {
ftrace_profile_enabled = 0; ftrace_profile_enabled = 0;
@ -970,8 +955,6 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
unregister_ftrace_profiler(); unregister_ftrace_profiler();
} }
} }
out:
mutex_unlock(&ftrace_profile_lock);
*ppos += cnt; *ppos += cnt;
@ -1671,14 +1654,12 @@ unsigned long ftrace_location(unsigned long ip)
loc = ftrace_location_range(ip, ip); loc = ftrace_location_range(ip, ip);
if (!loc) { if (!loc) {
if (!kallsyms_lookup_size_offset(ip, &size, &offset)) if (!kallsyms_lookup_size_offset(ip, &size, &offset))
goto out; return 0;
/* map sym+0 to __fentry__ */ /* map sym+0 to __fentry__ */
if (!offset) if (!offset)
loc = ftrace_location_range(ip, ip + size - 1); loc = ftrace_location_range(ip, ip + size - 1);
} }
out:
return loc; return loc;
} }
@ -2073,7 +2054,7 @@ rollback:
continue; continue;
if (rec == end) if (rec == end)
goto err_out; return -EBUSY;
in_old = !!ftrace_lookup_ip(old_hash, rec->ip); in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
in_new = !!ftrace_lookup_ip(new_hash, rec->ip); in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
@ -2086,7 +2067,6 @@ rollback:
rec->flags |= FTRACE_FL_IPMODIFY; rec->flags |= FTRACE_FL_IPMODIFY;
} while_for_each_ftrace_rec(); } while_for_each_ftrace_rec();
err_out:
return -EBUSY; return -EBUSY;
} }
@ -5615,20 +5595,15 @@ static DEFINE_MUTEX(ftrace_cmd_mutex);
__init int register_ftrace_command(struct ftrace_func_command *cmd) __init int register_ftrace_command(struct ftrace_func_command *cmd)
{ {
struct ftrace_func_command *p; struct ftrace_func_command *p;
int ret = 0;
mutex_lock(&ftrace_cmd_mutex); guard(mutex)(&ftrace_cmd_mutex);
list_for_each_entry(p, &ftrace_commands, list) { list_for_each_entry(p, &ftrace_commands, list) {
if (strcmp(cmd->name, p->name) == 0) { if (strcmp(cmd->name, p->name) == 0)
ret = -EBUSY; return -EBUSY;
goto out_unlock;
}
} }
list_add(&cmd->list, &ftrace_commands); list_add(&cmd->list, &ftrace_commands);
out_unlock:
mutex_unlock(&ftrace_cmd_mutex);
return ret; return 0;
} }
/* /*
@ -5638,20 +5613,17 @@ __init int register_ftrace_command(struct ftrace_func_command *cmd)
__init int unregister_ftrace_command(struct ftrace_func_command *cmd) __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
{ {
struct ftrace_func_command *p, *n; struct ftrace_func_command *p, *n;
int ret = -ENODEV;
mutex_lock(&ftrace_cmd_mutex); guard(mutex)(&ftrace_cmd_mutex);
list_for_each_entry_safe(p, n, &ftrace_commands, list) { list_for_each_entry_safe(p, n, &ftrace_commands, list) {
if (strcmp(cmd->name, p->name) == 0) { if (strcmp(cmd->name, p->name) == 0) {
ret = 0;
list_del_init(&p->list); list_del_init(&p->list);
goto out_unlock; return 0;
} }
} }
out_unlock:
mutex_unlock(&ftrace_cmd_mutex);
return ret; return -ENODEV;
} }
static int ftrace_process_regex(struct ftrace_iterator *iter, static int ftrace_process_regex(struct ftrace_iterator *iter,
@ -5661,7 +5633,7 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
struct trace_array *tr = iter->ops->private; struct trace_array *tr = iter->ops->private;
char *func, *command, *next = buff; char *func, *command, *next = buff;
struct ftrace_func_command *p; struct ftrace_func_command *p;
int ret = -EINVAL; int ret;
func = strsep(&next, ":"); func = strsep(&next, ":");
@ -5678,17 +5650,14 @@ static int ftrace_process_regex(struct ftrace_iterator *iter,
command = strsep(&next, ":"); command = strsep(&next, ":");
mutex_lock(&ftrace_cmd_mutex); guard(mutex)(&ftrace_cmd_mutex);
list_for_each_entry(p, &ftrace_commands, list) {
if (strcmp(p->name, command) == 0) {
ret = p->func(tr, hash, func, command, next, enable);
goto out_unlock;
}
}
out_unlock:
mutex_unlock(&ftrace_cmd_mutex);
return ret; list_for_each_entry(p, &ftrace_commands, list) {
if (strcmp(p->name, command) == 0)
return p->func(tr, hash, func, command, next, enable);
}
return -EINVAL;
} }
static ssize_t static ssize_t
@ -5722,12 +5691,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
parser->idx, enable); parser->idx, enable);
trace_parser_clear(parser); trace_parser_clear(parser);
if (ret < 0) if (ret < 0)
goto out; return ret;
} }
ret = read; return read;
out:
return ret;
} }
ssize_t ssize_t
@ -8287,7 +8254,7 @@ pid_write(struct file *filp, const char __user *ubuf,
if (!cnt) if (!cnt)
return 0; return 0;
mutex_lock(&ftrace_lock); guard(mutex)(&ftrace_lock);
switch (type) { switch (type) {
case TRACE_PIDS: case TRACE_PIDS:
@ -8303,14 +8270,13 @@ pid_write(struct file *filp, const char __user *ubuf,
lockdep_is_held(&ftrace_lock)); lockdep_is_held(&ftrace_lock));
break; break;
default: default:
ret = -EINVAL;
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
goto out; return -EINVAL;
} }
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
if (ret < 0) if (ret < 0)
goto out; return ret;
switch (type) { switch (type) {
case TRACE_PIDS: case TRACE_PIDS:
@ -8339,10 +8305,7 @@ pid_write(struct file *filp, const char __user *ubuf,
ftrace_update_pid_func(); ftrace_update_pid_func();
ftrace_startup_all(0); ftrace_startup_all(0);
out:
mutex_unlock(&ftrace_lock);
if (ret > 0)
*ppos += ret; *ppos += ret;
return ret; return ret;
@ -8746,17 +8709,17 @@ static int
ftrace_enable_sysctl(const struct ctl_table *table, int write, ftrace_enable_sysctl(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos) void *buffer, size_t *lenp, loff_t *ppos)
{ {
int ret = -ENODEV; int ret;
mutex_lock(&ftrace_lock); guard(mutex)(&ftrace_lock);
if (unlikely(ftrace_disabled)) if (unlikely(ftrace_disabled))
goto out; return -ENODEV;
ret = proc_dointvec(table, write, buffer, lenp, ppos); ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
goto out; return ret;
if (ftrace_enabled) { if (ftrace_enabled) {
@ -8770,8 +8733,7 @@ ftrace_enable_sysctl(const struct ctl_table *table, int write,
} else { } else {
if (is_permanent_ops_registered()) { if (is_permanent_ops_registered()) {
ftrace_enabled = true; ftrace_enabled = true;
ret = -EBUSY; return -EBUSY;
goto out;
} }
/* stopping ftrace calls (just send to ftrace_stub) */ /* stopping ftrace calls (just send to ftrace_stub) */
@ -8781,9 +8743,7 @@ ftrace_enable_sysctl(const struct ctl_table *table, int write,
} }
last_ftrace_enabled = !!ftrace_enabled; last_ftrace_enabled = !!ftrace_enabled;
out: return 0;
mutex_unlock(&ftrace_lock);
return ret;
} }
static struct ctl_table ftrace_sysctls[] = { static struct ctl_table ftrace_sysctls[] = {

View File

@ -25,30 +25,9 @@ menuconfig RV
For further information, see: For further information, see:
Documentation/trace/rv/runtime-verification.rst Documentation/trace/rv/runtime-verification.rst
config RV_MON_WIP source "kernel/trace/rv/monitors/wip/Kconfig"
depends on RV source "kernel/trace/rv/monitors/wwnr/Kconfig"
depends on PREEMPT_TRACER # Add new monitors here
select DA_MON_EVENTS_IMPLICIT
bool "wip monitor"
help
Enable wip (wakeup in preemptive) sample monitor that illustrates
the usage of per-cpu monitors, and one limitation of the
preempt_disable/enable events.
For further information, see:
Documentation/trace/rv/monitor_wip.rst
config RV_MON_WWNR
depends on RV
select DA_MON_EVENTS_ID
bool "wwnr monitor"
help
Enable wwnr (wakeup while not running) sample monitor, this is a
sample monitor that illustrates the usage of per-task monitor.
The model is borken on purpose: it serves to test reactors.
For further information, see:
Documentation/trace/rv/monitor_wwnr.rst
config RV_REACTORS config RV_REACTORS
bool "Runtime verification reactors" bool "Runtime verification reactors"

View File

@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
ccflags-y += -I $(src) # needed for trace events
obj-$(CONFIG_RV) += rv.o obj-$(CONFIG_RV) += rv.o
obj-$(CONFIG_RV_MON_WIP) += monitors/wip/wip.o obj-$(CONFIG_RV_MON_WIP) += monitors/wip/wip.o
obj-$(CONFIG_RV_MON_WWNR) += monitors/wwnr/wwnr.o obj-$(CONFIG_RV_MON_WWNR) += monitors/wwnr/wwnr.o
# Add new monitors here
obj-$(CONFIG_RV_REACTORS) += rv_reactors.o obj-$(CONFIG_RV_REACTORS) += rv_reactors.o
obj-$(CONFIG_RV_REACT_PRINTK) += reactor_printk.o obj-$(CONFIG_RV_REACT_PRINTK) += reactor_printk.o
obj-$(CONFIG_RV_REACT_PANIC) += reactor_panic.o obj-$(CONFIG_RV_REACT_PANIC) += reactor_panic.o

View File

@ -0,0 +1,12 @@
config RV_MON_WIP
depends on RV
depends on PREEMPT_TRACER
select DA_MON_EVENTS_IMPLICIT
bool "wip monitor"
help
Enable wip (wakeup in preemptive) sample monitor that illustrates
the usage of per-cpu monitors, and one limitation of the
preempt_disable/enable events.
For further information, see:
Documentation/trace/rv/monitor_wip.rst

View File

@ -10,7 +10,7 @@
#define MODULE_NAME "wip" #define MODULE_NAME "wip"
#include <trace/events/rv.h> #include <rv_trace.h>
#include <trace/events/sched.h> #include <trace/events/sched.h>
#include <trace/events/preemptirq.h> #include <trace/events/preemptirq.h>

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Snippet to be included in rv_trace.h
*/
#ifdef CONFIG_RV_MON_WIP
DEFINE_EVENT(event_da_monitor, event_wip,
TP_PROTO(char *state, char *event, char *next_state, bool final_state),
TP_ARGS(state, event, next_state, final_state));
DEFINE_EVENT(error_da_monitor, error_wip,
TP_PROTO(char *state, char *event),
TP_ARGS(state, event));
#endif /* CONFIG_RV_MON_WIP */

View File

@ -0,0 +1,11 @@
config RV_MON_WWNR
depends on RV
select DA_MON_EVENTS_ID
bool "wwnr monitor"
help
Enable wwnr (wakeup while not running) sample monitor, this is a
sample monitor that illustrates the usage of per-task monitor.
The model is borken on purpose: it serves to test reactors.
For further information, see:
Documentation/trace/rv/monitor_wwnr.rst

View File

@ -10,7 +10,7 @@
#define MODULE_NAME "wwnr" #define MODULE_NAME "wwnr"
#include <trace/events/rv.h> #include <rv_trace.h>
#include <trace/events/sched.h> #include <trace/events/sched.h>
#include "wwnr.h" #include "wwnr.h"

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Snippet to be included in rv_trace.h
*/
#ifdef CONFIG_RV_MON_WWNR
/* id is the pid of the task */
DEFINE_EVENT(event_da_monitor_id, event_wwnr,
TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
TP_ARGS(id, state, event, next_state, final_state));
DEFINE_EVENT(error_da_monitor_id, error_wwnr,
TP_PROTO(int id, char *state, char *event),
TP_ARGS(id, state, event));
#endif /* CONFIG_RV_MON_WWNR */

View File

@ -145,7 +145,7 @@
#ifdef CONFIG_DA_MON_EVENTS #ifdef CONFIG_DA_MON_EVENTS
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/rv.h> #include <rv_trace.h>
#endif #endif
#include "rv.h" #include "rv.h"

View File

@ -57,15 +57,9 @@ DECLARE_EVENT_CLASS(error_da_monitor,
__entry->state) __entry->state)
); );
#ifdef CONFIG_RV_MON_WIP #include <monitors/wip/wip_trace.h>
DEFINE_EVENT(event_da_monitor, event_wip, // Add new monitors based on CONFIG_DA_MON_EVENTS_IMPLICIT here
TP_PROTO(char *state, char *event, char *next_state, bool final_state),
TP_ARGS(state, event, next_state, final_state));
DEFINE_EVENT(error_da_monitor, error_wip,
TP_PROTO(char *state, char *event),
TP_ARGS(state, event));
#endif /* CONFIG_RV_MON_WIP */
#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */ #endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */
#ifdef CONFIG_DA_MON_EVENTS_ID #ifdef CONFIG_DA_MON_EVENTS_ID
@ -123,20 +117,14 @@ DECLARE_EVENT_CLASS(error_da_monitor_id,
__entry->state) __entry->state)
); );
#ifdef CONFIG_RV_MON_WWNR #include <monitors/wwnr/wwnr_trace.h>
/* id is the pid of the task */ // Add new monitors based on CONFIG_DA_MON_EVENTS_ID here
DEFINE_EVENT(event_da_monitor_id, event_wwnr,
TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state),
TP_ARGS(id, state, event, next_state, final_state));
DEFINE_EVENT(error_da_monitor_id, error_wwnr,
TP_PROTO(int id, char *state, char *event),
TP_ARGS(id, state, event));
#endif /* CONFIG_RV_MON_WWNR */
#endif /* CONFIG_DA_MON_EVENTS_ID */ #endif /* CONFIG_DA_MON_EVENTS_ID */
#endif /* _TRACE_RV_H */ #endif /* _TRACE_RV_H */
/* This part ust be outside protection */ /* This part ust be outside protection */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE rv_trace
#include <trace/define_trace.h> #include <trace/define_trace.h>

View File

@ -26,6 +26,7 @@
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/cleanup.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/module.h> #include <linux/module.h>
@ -535,19 +536,16 @@ LIST_HEAD(ftrace_trace_arrays);
int trace_array_get(struct trace_array *this_tr) int trace_array_get(struct trace_array *this_tr)
{ {
struct trace_array *tr; struct trace_array *tr;
int ret = -ENODEV;
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) { list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (tr == this_tr) { if (tr == this_tr) {
tr->ref++; tr->ref++;
ret = 0; return 0;
break;
} }
} }
mutex_unlock(&trace_types_lock);
return ret; return -ENODEV;
} }
static void __trace_array_put(struct trace_array *this_tr) static void __trace_array_put(struct trace_array *this_tr)
@ -1443,22 +1441,20 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
cond_update_fn_t update) cond_update_fn_t update)
{ {
struct cond_snapshot *cond_snapshot; struct cond_snapshot *cond_snapshot __free(kfree) =
int ret = 0; kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
int ret;
cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
if (!cond_snapshot) if (!cond_snapshot)
return -ENOMEM; return -ENOMEM;
cond_snapshot->cond_data = cond_data; cond_snapshot->cond_data = cond_data;
cond_snapshot->update = update; cond_snapshot->update = update;
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
if (tr->current_trace->use_max_tr) { if (tr->current_trace->use_max_tr)
ret = -EBUSY; return -EBUSY;
goto fail_unlock;
}
/* /*
* The cond_snapshot can only change to NULL without the * The cond_snapshot can only change to NULL without the
@ -1468,29 +1464,20 @@ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
* do safely with only holding the trace_types_lock and not * do safely with only holding the trace_types_lock and not
* having to take the max_lock. * having to take the max_lock.
*/ */
if (tr->cond_snapshot) { if (tr->cond_snapshot)
ret = -EBUSY; return -EBUSY;
goto fail_unlock;
}
ret = tracing_arm_snapshot_locked(tr); ret = tracing_arm_snapshot_locked(tr);
if (ret) if (ret)
goto fail_unlock; return ret;
local_irq_disable(); local_irq_disable();
arch_spin_lock(&tr->max_lock); arch_spin_lock(&tr->max_lock);
tr->cond_snapshot = cond_snapshot; tr->cond_snapshot = no_free_ptr(cond_snapshot);
arch_spin_unlock(&tr->max_lock); arch_spin_unlock(&tr->max_lock);
local_irq_enable(); local_irq_enable();
mutex_unlock(&trace_types_lock); return 0;
return ret;
fail_unlock:
mutex_unlock(&trace_types_lock);
kfree(cond_snapshot);
return ret;
} }
EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
@ -2203,10 +2190,10 @@ static __init int init_trace_selftests(void)
selftests_can_run = true; selftests_can_run = true;
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
if (list_empty(&postponed_selftests)) if (list_empty(&postponed_selftests))
goto out; return 0;
pr_info("Running postponed tracer tests:\n"); pr_info("Running postponed tracer tests:\n");
@ -2235,9 +2222,6 @@ static __init int init_trace_selftests(void)
} }
tracing_selftest_running = false; tracing_selftest_running = false;
out:
mutex_unlock(&trace_types_lock);
return 0; return 0;
} }
core_initcall(init_trace_selftests); core_initcall(init_trace_selftests);
@ -2807,7 +2791,7 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
int save_tracepoint_printk; int save_tracepoint_printk;
int ret; int ret;
mutex_lock(&tracepoint_printk_mutex); guard(mutex)(&tracepoint_printk_mutex);
save_tracepoint_printk = tracepoint_printk; save_tracepoint_printk = tracepoint_printk;
ret = proc_dointvec(table, write, buffer, lenp, ppos); ret = proc_dointvec(table, write, buffer, lenp, ppos);
@ -2820,16 +2804,13 @@ int tracepoint_printk_sysctl(const struct ctl_table *table, int write,
tracepoint_printk = 0; tracepoint_printk = 0;
if (save_tracepoint_printk == tracepoint_printk) if (save_tracepoint_printk == tracepoint_printk)
goto out; return ret;
if (tracepoint_printk) if (tracepoint_printk)
static_key_enable(&tracepoint_printk_key.key); static_key_enable(&tracepoint_printk_key.key);
else else
static_key_disable(&tracepoint_printk_key.key); static_key_disable(&tracepoint_printk_key.key);
out:
mutex_unlock(&tracepoint_printk_mutex);
return ret; return ret;
} }
@ -5126,7 +5107,8 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
u32 tracer_flags; u32 tracer_flags;
int i; int i;
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
tracer_flags = tr->current_trace->flags->val; tracer_flags = tr->current_trace->flags->val;
trace_opts = tr->current_trace->flags->opts; trace_opts = tr->current_trace->flags->opts;
@ -5143,7 +5125,6 @@ static int tracing_trace_options_show(struct seq_file *m, void *v)
else else
seq_printf(m, "no%s\n", trace_opts[i].name); seq_printf(m, "no%s\n", trace_opts[i].name);
} }
mutex_unlock(&trace_types_lock);
return 0; return 0;
} }
@ -5808,7 +5789,7 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
return; return;
} }
mutex_lock(&trace_eval_mutex); guard(mutex)(&trace_eval_mutex);
if (!trace_eval_maps) if (!trace_eval_maps)
trace_eval_maps = map_array; trace_eval_maps = map_array;
@ -5832,8 +5813,6 @@ trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
map_array++; map_array++;
} }
memset(map_array, 0, sizeof(*map_array)); memset(map_array, 0, sizeof(*map_array));
mutex_unlock(&trace_eval_mutex);
} }
static void trace_create_eval_file(struct dentry *d_tracer) static void trace_create_eval_file(struct dentry *d_tracer)
@ -5997,23 +5976,18 @@ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
{ {
int ret; int ret;
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
if (cpu_id != RING_BUFFER_ALL_CPUS) { if (cpu_id != RING_BUFFER_ALL_CPUS) {
/* make sure, this cpu is enabled in the mask */ /* make sure, this cpu is enabled in the mask */
if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask))
ret = -EINVAL; return -EINVAL;
goto out;
}
} }
ret = __tracing_resize_ring_buffer(tr, size, cpu_id); ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
if (ret < 0) if (ret < 0)
ret = -ENOMEM; ret = -ENOMEM;
out:
mutex_unlock(&trace_types_lock);
return ret; return ret;
} }
@ -6105,9 +6079,9 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
bool had_max_tr; bool had_max_tr;
#endif #endif
int ret = 0; int ret;
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
update_last_data(tr); update_last_data(tr);
@ -6115,7 +6089,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
ret = __tracing_resize_ring_buffer(tr, trace_buf_size, ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
RING_BUFFER_ALL_CPUS); RING_BUFFER_ALL_CPUS);
if (ret < 0) if (ret < 0)
goto out; return ret;
ret = 0; ret = 0;
} }
@ -6123,43 +6097,37 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (strcmp(t->name, buf) == 0) if (strcmp(t->name, buf) == 0)
break; break;
} }
if (!t) { if (!t)
ret = -EINVAL; return -EINVAL;
goto out;
}
if (t == tr->current_trace) if (t == tr->current_trace)
goto out; return 0;
#ifdef CONFIG_TRACER_SNAPSHOT #ifdef CONFIG_TRACER_SNAPSHOT
if (t->use_max_tr) { if (t->use_max_tr) {
local_irq_disable(); local_irq_disable();
arch_spin_lock(&tr->max_lock); arch_spin_lock(&tr->max_lock);
if (tr->cond_snapshot) ret = tr->cond_snapshot ? -EBUSY : 0;
ret = -EBUSY;
arch_spin_unlock(&tr->max_lock); arch_spin_unlock(&tr->max_lock);
local_irq_enable(); local_irq_enable();
if (ret) if (ret)
goto out; return ret;
} }
#endif #endif
/* Some tracers won't work on kernel command line */ /* Some tracers won't work on kernel command line */
if (system_state < SYSTEM_RUNNING && t->noboot) { if (system_state < SYSTEM_RUNNING && t->noboot) {
pr_warn("Tracer '%s' is not allowed on command line, ignored\n", pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
t->name); t->name);
goto out; return -EINVAL;
} }
/* Some tracers are only allowed for the top level buffer */ /* Some tracers are only allowed for the top level buffer */
if (!trace_ok_for_array(t, tr)) { if (!trace_ok_for_array(t, tr))
ret = -EINVAL; return -EINVAL;
goto out;
}
/* If trace pipe files are being read, we can't change the tracer */ /* If trace pipe files are being read, we can't change the tracer */
if (tr->trace_ref) { if (tr->trace_ref)
ret = -EBUSY; return -EBUSY;
goto out;
}
trace_branch_disable(); trace_branch_disable();
@ -6190,7 +6158,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (!had_max_tr && t->use_max_tr) { if (!had_max_tr && t->use_max_tr) {
ret = tracing_arm_snapshot_locked(tr); ret = tracing_arm_snapshot_locked(tr);
if (ret) if (ret)
goto out; return ret;
} }
#else #else
tr->current_trace = &nop_trace; tr->current_trace = &nop_trace;
@ -6203,17 +6171,15 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (t->use_max_tr) if (t->use_max_tr)
tracing_disarm_snapshot(tr); tracing_disarm_snapshot(tr);
#endif #endif
goto out; return ret;
} }
} }
tr->current_trace = t; tr->current_trace = t;
tr->current_trace->enabled++; tr->current_trace->enabled++;
trace_branch_enable(tr); trace_branch_enable(tr);
out:
mutex_unlock(&trace_types_lock);
return ret; return 0;
} }
static ssize_t static ssize_t
@ -6291,22 +6257,18 @@ tracing_thresh_write(struct file *filp, const char __user *ubuf,
struct trace_array *tr = filp->private_data; struct trace_array *tr = filp->private_data;
int ret; int ret;
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
if (ret < 0) if (ret < 0)
goto out; return ret;
if (tr->current_trace->update_thresh) { if (tr->current_trace->update_thresh) {
ret = tr->current_trace->update_thresh(tr); ret = tr->current_trace->update_thresh(tr);
if (ret < 0) if (ret < 0)
goto out; return ret;
} }
ret = cnt; return cnt;
out:
mutex_unlock(&trace_types_lock);
return ret;
} }
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
@ -6525,31 +6487,29 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
* This is just a matter of traces coherency, the ring buffer itself * This is just a matter of traces coherency, the ring buffer itself
* is protected. * is protected.
*/ */
mutex_lock(&iter->mutex); guard(mutex)(&iter->mutex);
/* return any leftover data */ /* return any leftover data */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt); sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
if (sret != -EBUSY) if (sret != -EBUSY)
goto out; return sret;
trace_seq_init(&iter->seq); trace_seq_init(&iter->seq);
if (iter->trace->read) { if (iter->trace->read) {
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
if (sret) if (sret)
goto out; return sret;
} }
waitagain: waitagain:
sret = tracing_wait_pipe(filp); sret = tracing_wait_pipe(filp);
if (sret <= 0) if (sret <= 0)
goto out; return sret;
/* stop when tracing is finished */ /* stop when tracing is finished */
if (trace_empty(iter)) { if (trace_empty(iter))
sret = 0; return 0;
goto out;
}
if (cnt >= TRACE_SEQ_BUFFER_SIZE) if (cnt >= TRACE_SEQ_BUFFER_SIZE)
cnt = TRACE_SEQ_BUFFER_SIZE - 1; cnt = TRACE_SEQ_BUFFER_SIZE - 1;
@ -6613,9 +6573,6 @@ waitagain:
if (sret == -EBUSY) if (sret == -EBUSY)
goto waitagain; goto waitagain;
out:
mutex_unlock(&iter->mutex);
return sret; return sret;
} }
@ -7207,25 +7164,19 @@ u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_eve
*/ */
int tracing_set_filter_buffering(struct trace_array *tr, bool set) int tracing_set_filter_buffering(struct trace_array *tr, bool set)
{ {
int ret = 0; guard(mutex)(&trace_types_lock);
mutex_lock(&trace_types_lock);
if (set && tr->no_filter_buffering_ref++) if (set && tr->no_filter_buffering_ref++)
goto out; return 0;
if (!set) { if (!set) {
if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) { if (WARN_ON_ONCE(!tr->no_filter_buffering_ref))
ret = -EINVAL; return -EINVAL;
goto out;
}
--tr->no_filter_buffering_ref; --tr->no_filter_buffering_ref;
} }
out:
mutex_unlock(&trace_types_lock);
return ret; return 0;
} }
struct ftrace_buffer_info { struct ftrace_buffer_info {
@ -7301,12 +7252,10 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (ret) if (ret)
return ret; return ret;
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
if (tr->current_trace->use_max_tr) { if (tr->current_trace->use_max_tr)
ret = -EBUSY; return -EBUSY;
goto out;
}
local_irq_disable(); local_irq_disable();
arch_spin_lock(&tr->max_lock); arch_spin_lock(&tr->max_lock);
@ -7315,24 +7264,20 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
arch_spin_unlock(&tr->max_lock); arch_spin_unlock(&tr->max_lock);
local_irq_enable(); local_irq_enable();
if (ret) if (ret)
goto out; return ret;
switch (val) { switch (val) {
case 0: case 0:
if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
ret = -EINVAL; return -EINVAL;
break;
}
if (tr->allocated_snapshot) if (tr->allocated_snapshot)
free_snapshot(tr); free_snapshot(tr);
break; break;
case 1: case 1:
/* Only allow per-cpu swap if the ring buffer supports it */ /* Only allow per-cpu swap if the ring buffer supports it */
#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { if (iter->cpu_file != RING_BUFFER_ALL_CPUS)
ret = -EINVAL; return -EINVAL;
break;
}
#endif #endif
if (tr->allocated_snapshot) if (tr->allocated_snapshot)
ret = resize_buffer_duplicate_size(&tr->max_buffer, ret = resize_buffer_duplicate_size(&tr->max_buffer,
@ -7340,7 +7285,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
ret = tracing_arm_snapshot_locked(tr); ret = tracing_arm_snapshot_locked(tr);
if (ret) if (ret)
break; return ret;
/* Now, we're going to swap */ /* Now, we're going to swap */
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
@ -7367,8 +7312,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
*ppos += cnt; *ppos += cnt;
ret = cnt; ret = cnt;
} }
out:
mutex_unlock(&trace_types_lock);
return ret; return ret;
} }
@ -7754,12 +7698,11 @@ void tracing_log_err(struct trace_array *tr,
len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1; len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
mutex_lock(&tracing_err_log_lock); guard(mutex)(&tracing_err_log_lock);
err = get_tracing_log_err(tr, len); err = get_tracing_log_err(tr, len);
if (PTR_ERR(err) == -ENOMEM) { if (PTR_ERR(err) == -ENOMEM)
mutex_unlock(&tracing_err_log_lock);
return; return;
}
snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd); snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
@ -7770,7 +7713,6 @@ void tracing_log_err(struct trace_array *tr,
err->info.ts = local_clock(); err->info.ts = local_clock();
list_add_tail(&err->list, &tr->err_log); list_add_tail(&err->list, &tr->err_log);
mutex_unlock(&tracing_err_log_lock);
} }
static void clear_tracing_err_log(struct trace_array *tr) static void clear_tracing_err_log(struct trace_array *tr)
@ -9514,20 +9456,17 @@ static int instance_mkdir(const char *name)
struct trace_array *tr; struct trace_array *tr;
int ret; int ret;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
ret = -EEXIST; ret = -EEXIST;
if (trace_array_find(name)) if (trace_array_find(name))
goto out_unlock; return -EEXIST;
tr = trace_array_create(name); tr = trace_array_create(name);
ret = PTR_ERR_OR_ZERO(tr); ret = PTR_ERR_OR_ZERO(tr);
out_unlock:
mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return ret; return ret;
} }
@ -9577,24 +9516,23 @@ struct trace_array *trace_array_get_by_name(const char *name, const char *system
{ {
struct trace_array *tr; struct trace_array *tr;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) { list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (tr->name && strcmp(tr->name, name) == 0) if (tr->name && strcmp(tr->name, name) == 0) {
goto out_unlock; tr->ref++;
return tr;
}
} }
tr = trace_array_create_systems(name, systems, 0, 0); tr = trace_array_create_systems(name, systems, 0, 0);
if (IS_ERR(tr)) if (IS_ERR(tr))
tr = NULL; tr = NULL;
out_unlock: else
if (tr)
tr->ref++; tr->ref++;
mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
return tr; return tr;
} }
EXPORT_SYMBOL_GPL(trace_array_get_by_name); EXPORT_SYMBOL_GPL(trace_array_get_by_name);
@ -9645,48 +9583,36 @@ static int __remove_instance(struct trace_array *tr)
int trace_array_destroy(struct trace_array *this_tr) int trace_array_destroy(struct trace_array *this_tr)
{ {
struct trace_array *tr; struct trace_array *tr;
int ret;
if (!this_tr) if (!this_tr)
return -EINVAL; return -EINVAL;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
ret = -ENODEV;
/* Making sure trace array exists before destroying it. */ /* Making sure trace array exists before destroying it. */
list_for_each_entry(tr, &ftrace_trace_arrays, list) { list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (tr == this_tr) { if (tr == this_tr)
ret = __remove_instance(tr); return __remove_instance(tr);
break;
}
} }
mutex_unlock(&trace_types_lock); return -ENODEV;
mutex_unlock(&event_mutex);
return ret;
} }
EXPORT_SYMBOL_GPL(trace_array_destroy); EXPORT_SYMBOL_GPL(trace_array_destroy);
static int instance_rmdir(const char *name) static int instance_rmdir(const char *name)
{ {
struct trace_array *tr; struct trace_array *tr;
int ret;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
ret = -ENODEV;
tr = trace_array_find(name); tr = trace_array_find(name);
if (tr) if (!tr)
ret = __remove_instance(tr); return -ENODEV;
mutex_unlock(&trace_types_lock); return __remove_instance(tr);
mutex_unlock(&event_mutex);
return ret;
} }
static __init void create_trace_instances(struct dentry *d_tracer) static __init void create_trace_instances(struct dentry *d_tracer)
@ -9699,19 +9625,16 @@ static __init void create_trace_instances(struct dentry *d_tracer)
if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
return; return;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) { list_for_each_entry(tr, &ftrace_trace_arrays, list) {
if (!tr->name) if (!tr->name)
continue; continue;
if (MEM_FAIL(trace_array_create_dir(tr) < 0, if (MEM_FAIL(trace_array_create_dir(tr) < 0,
"Failed to create instance directory\n")) "Failed to create instance directory\n"))
break; return;
} }
mutex_unlock(&trace_types_lock);
mutex_unlock(&event_mutex);
} }
static void static void
@ -9925,7 +9848,7 @@ static void trace_module_remove_evals(struct module *mod)
if (!mod->num_trace_evals) if (!mod->num_trace_evals)
return; return;
mutex_lock(&trace_eval_mutex); guard(mutex)(&trace_eval_mutex);
map = trace_eval_maps; map = trace_eval_maps;
@ -9937,12 +9860,10 @@ static void trace_module_remove_evals(struct module *mod)
map = map->tail.next; map = map->tail.next;
} }
if (!map) if (!map)
goto out; return;
*last = trace_eval_jmp_to_tail(map)->tail.next; *last = trace_eval_jmp_to_tail(map)->tail.next;
kfree(map); kfree(map);
out:
mutex_unlock(&trace_eval_mutex);
} }
#else #else
static inline void trace_module_remove_evals(struct module *mod) { } static inline void trace_module_remove_evals(struct module *mod) { }

View File

@ -74,24 +74,19 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
struct dyn_event *pos, *n; struct dyn_event *pos, *n;
char *system = NULL, *event, *p; char *system = NULL, *event, *p;
int argc, ret = -ENOENT; int argc, ret = -ENOENT;
char **argv; char **argv __free(argv_free) = argv_split(GFP_KERNEL, raw_command, &argc);
argv = argv_split(GFP_KERNEL, raw_command, &argc);
if (!argv) if (!argv)
return -ENOMEM; return -ENOMEM;
if (argv[0][0] == '-') { if (argv[0][0] == '-') {
if (argv[0][1] != ':') { if (argv[0][1] != ':')
ret = -EINVAL; return -EINVAL;
goto out;
}
event = &argv[0][2]; event = &argv[0][2];
} else { } else {
event = strchr(argv[0], ':'); event = strchr(argv[0], ':');
if (!event) { if (!event)
ret = -EINVAL; return -EINVAL;
goto out;
}
event++; event++;
} }
@ -101,10 +96,8 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
event = p + 1; event = p + 1;
*p = '\0'; *p = '\0';
} }
if (!system && event[0] == '\0') { if (!system && event[0] == '\0')
ret = -EINVAL; return -EINVAL;
goto out;
}
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
for_each_dyn_event_safe(pos, n) { for_each_dyn_event_safe(pos, n) {
@ -120,8 +113,6 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
} }
tracing_reset_all_online_cpus(); tracing_reset_all_online_cpus();
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
out:
argv_free(argv);
return ret; return ret;
} }

View File

@ -917,10 +917,10 @@ static int __trace_eprobe_create(int argc, const char *argv[])
goto error; goto error;
} }
mutex_lock(&event_mutex); scoped_guard(mutex, &event_mutex) {
event_call = find_and_get_event(sys_name, sys_event); event_call = find_and_get_event(sys_name, sys_event);
ep = alloc_event_probe(group, event, event_call, argc - 2); ep = alloc_event_probe(group, event, event_call, argc - 2);
mutex_unlock(&event_mutex); }
if (IS_ERR(ep)) { if (IS_ERR(ep)) {
ret = PTR_ERR(ep); ret = PTR_ERR(ep);
@ -952,23 +952,21 @@ static int __trace_eprobe_create(int argc, const char *argv[])
if (ret < 0) if (ret < 0)
goto error; goto error;
init_trace_eprobe_call(ep); init_trace_eprobe_call(ep);
mutex_lock(&event_mutex); scoped_guard(mutex, &event_mutex) {
ret = trace_probe_register_event_call(&ep->tp); ret = trace_probe_register_event_call(&ep->tp);
if (ret) { if (ret) {
if (ret == -EEXIST) { if (ret == -EEXIST) {
trace_probe_log_set_index(0); trace_probe_log_set_index(0);
trace_probe_log_err(0, EVENT_EXIST); trace_probe_log_err(0, EVENT_EXIST);
} }
mutex_unlock(&event_mutex);
goto error; goto error;
} }
ret = dyn_event_add(&ep->devent, &ep->tp.event->call); ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
if (ret < 0) { if (ret < 0) {
trace_probe_unregister_event_call(&ep->tp); trace_probe_unregister_event_call(&ep->tp);
mutex_unlock(&event_mutex);
goto error; goto error;
} }
mutex_unlock(&event_mutex); }
return ret; return ret;
parse_error: parse_error:
ret = -EINVAL; ret = -EINVAL;

View File

@ -1558,21 +1558,20 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
if (ret) if (ret)
return ret; return ret;
guard(mutex)(&event_mutex);
switch (val) { switch (val) {
case 0: case 0:
case 1: case 1:
ret = -ENODEV;
mutex_lock(&event_mutex);
file = event_file_file(filp); file = event_file_file(filp);
if (likely(file)) { if (!file)
return -ENODEV;
ret = tracing_update_buffers(file->tr); ret = tracing_update_buffers(file->tr);
if (ret < 0) { if (ret < 0)
mutex_unlock(&event_mutex);
return ret; return ret;
}
ret = ftrace_event_enable_disable(file, val); ret = ftrace_event_enable_disable(file, val);
} if (ret < 0)
mutex_unlock(&event_mutex); return ret;
break; break;
default: default:
@ -1581,7 +1580,7 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
*ppos += cnt; *ppos += cnt;
return ret ? ret : cnt; return cnt;
} }
static ssize_t static ssize_t
@ -2157,7 +2156,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
if (ret < 0) if (ret < 0)
return ret; return ret;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
if (type == TRACE_PIDS) { if (type == TRACE_PIDS) {
filtered_pids = rcu_dereference_protected(tr->filtered_pids, filtered_pids = rcu_dereference_protected(tr->filtered_pids,
@ -2173,7 +2172,7 @@ event_pid_write(struct file *filp, const char __user *ubuf,
ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
if (ret < 0) if (ret < 0)
goto out; return ret;
if (type == TRACE_PIDS) if (type == TRACE_PIDS)
rcu_assign_pointer(tr->filtered_pids, pid_list); rcu_assign_pointer(tr->filtered_pids, pid_list);
@ -2198,10 +2197,6 @@ event_pid_write(struct file *filp, const char __user *ubuf,
*/ */
on_each_cpu(ignore_task_cpu, tr, 1); on_each_cpu(ignore_task_cpu, tr, 1);
out:
mutex_unlock(&event_mutex);
if (ret > 0)
*ppos += ret; *ppos += ret;
return ret; return ret;
@ -3111,6 +3106,20 @@ static bool event_in_systems(struct trace_event_call *call,
return !*p || isspace(*p) || *p == ','; return !*p || isspace(*p) || *p == ',';
} }
#ifdef CONFIG_HIST_TRIGGERS
/*
* Wake up waiter on the hist_poll_wq from irq_work because the hist trigger
* may happen in any context.
*/
static void hist_poll_event_irq_work(struct irq_work *work)
{
wake_up_all(&hist_poll_wq);
}
DEFINE_IRQ_WORK(hist_poll_work, hist_poll_event_irq_work);
DECLARE_WAIT_QUEUE_HEAD(hist_poll_wq);
#endif
static struct trace_event_file * static struct trace_event_file *
trace_create_new_event(struct trace_event_call *call, trace_create_new_event(struct trace_event_call *call,
struct trace_array *tr) struct trace_array *tr)
@ -3269,13 +3278,13 @@ int trace_add_event_call(struct trace_event_call *call)
int ret; int ret;
lockdep_assert_held(&event_mutex); lockdep_assert_held(&event_mutex);
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
ret = __register_event(call, NULL); ret = __register_event(call, NULL);
if (ret >= 0) if (ret < 0)
__add_event_to_tracers(call); return ret;
mutex_unlock(&trace_types_lock); __add_event_to_tracers(call);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(trace_add_event_call); EXPORT_SYMBOL_GPL(trace_add_event_call);
@ -3529,30 +3538,21 @@ struct trace_event_file *trace_get_event_file(const char *instance,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
file = find_event_file(tr, system, event); file = find_event_file(tr, system, event);
if (!file) { if (!file) {
trace_array_put(tr); trace_array_put(tr);
ret = -EINVAL; return ERR_PTR(-EINVAL);
goto out;
} }
/* Don't let event modules unload while in use */ /* Don't let event modules unload while in use */
ret = trace_event_try_get_ref(file->event_call); ret = trace_event_try_get_ref(file->event_call);
if (!ret) { if (!ret) {
trace_array_put(tr); trace_array_put(tr);
ret = -EBUSY; return ERR_PTR(-EBUSY);
goto out;
} }
ret = 0;
out:
mutex_unlock(&event_mutex);
if (ret)
file = ERR_PTR(ret);
return file; return file;
} }
EXPORT_SYMBOL_GPL(trace_get_event_file); EXPORT_SYMBOL_GPL(trace_get_event_file);
@ -3770,6 +3770,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
struct trace_event_file *file; struct trace_event_file *file;
struct ftrace_probe_ops *ops; struct ftrace_probe_ops *ops;
struct event_probe_data *data; struct event_probe_data *data;
unsigned long count = -1;
const char *system; const char *system;
const char *event; const char *event;
char *number; char *number;
@ -3789,12 +3790,11 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
event = strsep(&param, ":"); event = strsep(&param, ":");
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
ret = -EINVAL;
file = find_event_file(tr, system, event); file = find_event_file(tr, system, event);
if (!file) if (!file)
goto out; return -EINVAL;
enable = strcmp(cmd, ENABLE_EVENT_STR) == 0; enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
@ -3803,74 +3803,62 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
else else
ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops; ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
if (glob[0] == '!') { if (glob[0] == '!')
ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); return unregister_ftrace_function_probe_func(glob+1, tr, ops);
goto out;
}
ret = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto out;
data->enable = enable;
data->count = -1;
data->file = file;
if (!param)
goto out_reg;
if (param) {
number = strsep(&param, ":"); number = strsep(&param, ":");
ret = -EINVAL;
if (!strlen(number)) if (!strlen(number))
goto out_free; return -EINVAL;
/* /*
* We use the callback data field (which is a pointer) * We use the callback data field (which is a pointer)
* as our counter. * as our counter.
*/ */
ret = kstrtoul(number, 0, &data->count); ret = kstrtoul(number, 0, &count);
if (ret) if (ret)
goto out_free; return ret;
}
out_reg:
/* Don't let event modules unload while probe registered */ /* Don't let event modules unload while probe registered */
ret = trace_event_try_get_ref(file->event_call); ret = trace_event_try_get_ref(file->event_call);
if (!ret) { if (!ret)
ret = -EBUSY; return -EBUSY;
goto out_free;
}
ret = __ftrace_event_enable_disable(file, 1, 1); ret = __ftrace_event_enable_disable(file, 1, 1);
if (ret < 0) if (ret < 0)
goto out_put; goto out_put;
ret = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto out_put;
data->enable = enable;
data->count = count;
data->file = file;
ret = register_ftrace_function_probe(glob, tr, ops, data); ret = register_ftrace_function_probe(glob, tr, ops, data);
/* /*
* The above returns on success the # of functions enabled, * The above returns on success the # of functions enabled,
* but if it didn't find any functions it returns zero. * but if it didn't find any functions it returns zero.
* Consider no functions a failure too. * Consider no functions a failure too.
*/ */
if (!ret) {
ret = -ENOENT;
goto out_disable;
} else if (ret < 0)
goto out_disable;
/* Just return zero, not the number of enabled functions */
ret = 0;
out:
mutex_unlock(&event_mutex);
return ret;
out_disable: /* Just return zero, not the number of enabled functions */
if (ret > 0)
return 0;
kfree(data);
if (!ret)
ret = -ENOENT;
__ftrace_event_enable_disable(file, 0, 1); __ftrace_event_enable_disable(file, 0, 1);
out_put: out_put:
trace_event_put_ref(file->event_call); trace_event_put_ref(file->event_call);
out_free: return ret;
kfree(data);
goto out;
} }
static struct ftrace_func_command event_enable_cmd = { static struct ftrace_func_command event_enable_cmd = {
@ -4093,20 +4081,17 @@ early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
{ {
int ret; int ret;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
ret = create_event_toplevel_files(parent, tr); ret = create_event_toplevel_files(parent, tr);
if (ret) if (ret)
goto out_unlock; return ret;
down_write(&trace_event_sem); down_write(&trace_event_sem);
__trace_early_add_event_dirs(tr); __trace_early_add_event_dirs(tr);
up_write(&trace_event_sem); up_write(&trace_event_sem);
out_unlock: return 0;
mutex_unlock(&event_mutex);
return ret;
} }
/* Must be called with event_mutex held */ /* Must be called with event_mutex held */

View File

@ -2405,13 +2405,11 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
struct event_filter *filter = NULL; struct event_filter *filter = NULL;
int err = 0; int err = 0;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
/* Make sure the system still has events */ /* Make sure the system still has events */
if (!dir->nr_events) { if (!dir->nr_events)
err = -ENODEV; return -ENODEV;
goto out_unlock;
}
if (!strcmp(strstrip(filter_string), "0")) { if (!strcmp(strstrip(filter_string), "0")) {
filter_free_subsystem_preds(dir, tr); filter_free_subsystem_preds(dir, tr);
@ -2422,7 +2420,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
tracepoint_synchronize_unregister(); tracepoint_synchronize_unregister();
filter_free_subsystem_filters(dir, tr); filter_free_subsystem_filters(dir, tr);
__free_filter(filter); __free_filter(filter);
goto out_unlock; return 0;
} }
err = create_system_filter(dir, filter_string, &filter); err = create_system_filter(dir, filter_string, &filter);
@ -2434,8 +2432,6 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
__free_filter(system->filter); __free_filter(system->filter);
system->filter = filter; system->filter = filter;
} }
out_unlock:
mutex_unlock(&event_mutex);
return err; return err;
} }
@ -2612,17 +2608,15 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
struct event_filter *filter = NULL; struct event_filter *filter = NULL;
struct trace_event_call *call; struct trace_event_call *call;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
call = event->tp_event; call = event->tp_event;
err = -EINVAL;
if (!call) if (!call)
goto out_unlock; return -EINVAL;
err = -EEXIST;
if (event->filter) if (event->filter)
goto out_unlock; return -EEXIST;
err = create_filter(NULL, call, filter_str, false, &filter); err = create_filter(NULL, call, filter_str, false, &filter);
if (err) if (err)
@ -2637,9 +2631,6 @@ free_filter:
if (err || ftrace_event_is_function(call)) if (err || ftrace_event_is_function(call))
__free_filter(filter); __free_filter(filter);
out_unlock:
mutex_unlock(&event_mutex);
return err; return err;
} }

View File

@ -5311,6 +5311,8 @@ static void event_hist_trigger(struct event_trigger_data *data,
if (resolve_var_refs(hist_data, key, var_ref_vals, true)) if (resolve_var_refs(hist_data, key, var_ref_vals, true))
hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals); hist_trigger_actions(hist_data, elt, buffer, rec, rbe, key, var_ref_vals);
hist_poll_wakeup();
} }
static void hist_trigger_stacktrace_print(struct seq_file *m, static void hist_trigger_stacktrace_print(struct seq_file *m,
@ -5590,49 +5592,128 @@ static void hist_trigger_show(struct seq_file *m,
n_entries, (u64)atomic64_read(&hist_data->map->drops)); n_entries, (u64)atomic64_read(&hist_data->map->drops));
} }
struct hist_file_data {
struct file *file;
u64 last_read;
u64 last_act;
};
static u64 get_hist_hit_count(struct trace_event_file *event_file)
{
struct hist_trigger_data *hist_data;
struct event_trigger_data *data;
u64 ret = 0;
list_for_each_entry(data, &event_file->triggers, list) {
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) {
hist_data = data->private_data;
ret += atomic64_read(&hist_data->map->hits);
}
}
return ret;
}
static int hist_show(struct seq_file *m, void *v) static int hist_show(struct seq_file *m, void *v)
{ {
struct hist_file_data *hist_file = m->private;
struct event_trigger_data *data; struct event_trigger_data *data;
struct trace_event_file *event_file; struct trace_event_file *event_file;
int n = 0, ret = 0; int n = 0;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
event_file = event_file_file(m->private); event_file = event_file_file(hist_file->file);
if (unlikely(!event_file)) { if (unlikely(!event_file))
ret = -ENODEV; return -ENODEV;
goto out_unlock;
}
list_for_each_entry(data, &event_file->triggers, list) { list_for_each_entry(data, &event_file->triggers, list) {
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
hist_trigger_show(m, data, n++); hist_trigger_show(m, data, n++);
} }
hist_file->last_read = get_hist_hit_count(event_file);
/*
* Update last_act too so that poll()/POLLPRI can wait for the next
* event after any syscall on hist file.
*/
hist_file->last_act = hist_file->last_read;
out_unlock: return 0;
mutex_unlock(&event_mutex); }
static __poll_t event_hist_poll(struct file *file, struct poll_table_struct *wait)
{
struct trace_event_file *event_file;
struct seq_file *m = file->private_data;
struct hist_file_data *hist_file = m->private;
__poll_t ret = 0;
u64 cnt;
guard(mutex)(&event_mutex);
event_file = event_file_data(file);
if (!event_file)
return EPOLLERR;
hist_poll_wait(file, wait);
cnt = get_hist_hit_count(event_file);
if (hist_file->last_read != cnt)
ret |= EPOLLIN | EPOLLRDNORM;
if (hist_file->last_act != cnt) {
hist_file->last_act = cnt;
ret |= EPOLLPRI;
}
return ret; return ret;
} }
static int event_hist_release(struct inode *inode, struct file *file)
{
struct seq_file *m = file->private_data;
struct hist_file_data *hist_file = m->private;
kfree(hist_file);
return tracing_single_release_file_tr(inode, file);
}
static int event_hist_open(struct inode *inode, struct file *file) static int event_hist_open(struct inode *inode, struct file *file)
{ {
struct trace_event_file *event_file;
struct hist_file_data *hist_file;
int ret; int ret;
ret = tracing_open_file_tr(inode, file); ret = tracing_open_file_tr(inode, file);
if (ret) if (ret)
return ret; return ret;
guard(mutex)(&event_mutex);
event_file = event_file_data(file);
if (!event_file)
return -ENODEV;
hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL);
if (!hist_file)
return -ENOMEM;
hist_file->file = file;
hist_file->last_act = get_hist_hit_count(event_file);
/* Clear private_data to avoid warning in single_open() */ /* Clear private_data to avoid warning in single_open() */
file->private_data = NULL; file->private_data = NULL;
return single_open(file, hist_show, file); ret = single_open(file, hist_show, hist_file);
if (ret)
kfree(hist_file);
return ret;
} }
const struct file_operations event_hist_fops = { const struct file_operations event_hist_fops = {
.open = event_hist_open, .open = event_hist_open,
.read = seq_read, .read = seq_read,
.llseek = seq_lseek, .llseek = seq_lseek,
.release = tracing_single_release_file_tr, .release = event_hist_release,
.poll = event_hist_poll,
}; };
#ifdef CONFIG_HIST_TRIGGERS_DEBUG #ifdef CONFIG_HIST_TRIGGERS_DEBUG
@ -5873,25 +5954,19 @@ static int hist_debug_show(struct seq_file *m, void *v)
{ {
struct event_trigger_data *data; struct event_trigger_data *data;
struct trace_event_file *event_file; struct trace_event_file *event_file;
int n = 0, ret = 0; int n = 0;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
event_file = event_file_file(m->private); event_file = event_file_file(m->private);
if (unlikely(!event_file)) { if (unlikely(!event_file))
ret = -ENODEV; return -ENODEV;
goto out_unlock;
}
list_for_each_entry(data, &event_file->triggers, list) { list_for_each_entry(data, &event_file->triggers, list) {
if (data->cmd_ops->trigger_type == ETT_EVENT_HIST) if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
hist_trigger_debug_show(m, data, n++); hist_trigger_debug_show(m, data, n++);
} }
return 0;
out_unlock:
mutex_unlock(&event_mutex);
return ret;
} }
static int event_hist_debug_open(struct inode *inode, struct file *file) static int event_hist_debug_open(struct inode *inode, struct file *file)

View File

@ -49,16 +49,11 @@ static char *last_cmd;
static int errpos(const char *str) static int errpos(const char *str)
{ {
int ret = 0; guard(mutex)(&lastcmd_mutex);
mutex_lock(&lastcmd_mutex);
if (!str || !last_cmd) if (!str || !last_cmd)
goto out; return 0;
ret = err_pos(last_cmd, str); return err_pos(last_cmd, str);
out:
mutex_unlock(&lastcmd_mutex);
return ret;
} }
static void last_cmd_set(const char *str) static void last_cmd_set(const char *str)
@ -74,14 +69,12 @@ static void last_cmd_set(const char *str)
static void synth_err(u8 err_type, u16 err_pos) static void synth_err(u8 err_type, u16 err_pos)
{ {
mutex_lock(&lastcmd_mutex); guard(mutex)(&lastcmd_mutex);
if (!last_cmd) if (!last_cmd)
goto out; return;
tracing_log_err(NULL, "synthetic_events", last_cmd, err_text, tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
err_type, err_pos); err_type, err_pos);
out:
mutex_unlock(&lastcmd_mutex);
} }
static int create_synth_event(const char *raw_command); static int create_synth_event(const char *raw_command);

View File

@ -211,12 +211,10 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
if (ret) if (ret)
return ret; return ret;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
if (unlikely(!event_file_file(file))) { if (unlikely(!event_file_file(file)))
mutex_unlock(&event_mutex);
return -ENODEV; return -ENODEV;
}
if ((file->f_mode & FMODE_WRITE) && if ((file->f_mode & FMODE_WRITE) &&
(file->f_flags & O_TRUNC)) { (file->f_flags & O_TRUNC)) {
@ -239,8 +237,6 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
} }
} }
mutex_unlock(&event_mutex);
return ret; return ret;
} }
@ -248,7 +244,6 @@ int trigger_process_regex(struct trace_event_file *file, char *buff)
{ {
char *command, *next; char *command, *next;
struct event_command *p; struct event_command *p;
int ret = -EINVAL;
next = buff = skip_spaces(buff); next = buff = skip_spaces(buff);
command = strsep(&next, ": \t"); command = strsep(&next, ": \t");
@ -259,17 +254,14 @@ int trigger_process_regex(struct trace_event_file *file, char *buff)
} }
command = (command[0] != '!') ? command : command + 1; command = (command[0] != '!') ? command : command + 1;
mutex_lock(&trigger_cmd_mutex); guard(mutex)(&trigger_cmd_mutex);
list_for_each_entry(p, &trigger_commands, list) {
if (strcmp(p->name, command) == 0) {
ret = p->parse(p, file, buff, command, next);
goto out_unlock;
}
}
out_unlock:
mutex_unlock(&trigger_cmd_mutex);
return ret; list_for_each_entry(p, &trigger_commands, list) {
if (strcmp(p->name, command) == 0)
return p->parse(p, file, buff, command, next);
}
return -EINVAL;
} }
static ssize_t event_trigger_regex_write(struct file *file, static ssize_t event_trigger_regex_write(struct file *file,
@ -278,7 +270,7 @@ static ssize_t event_trigger_regex_write(struct file *file,
{ {
struct trace_event_file *event_file; struct trace_event_file *event_file;
ssize_t ret; ssize_t ret;
char *buf; char *buf __free(kfree) = NULL;
if (!cnt) if (!cnt)
return 0; return 0;
@ -292,24 +284,18 @@ static ssize_t event_trigger_regex_write(struct file *file,
strim(buf); strim(buf);
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
event_file = event_file_file(file);
if (unlikely(!event_file)) {
mutex_unlock(&event_mutex);
kfree(buf);
return -ENODEV;
}
ret = trigger_process_regex(event_file, buf);
mutex_unlock(&event_mutex);
kfree(buf); event_file = event_file_file(file);
if (unlikely(!event_file))
return -ENODEV;
ret = trigger_process_regex(event_file, buf);
if (ret < 0) if (ret < 0)
goto out; return ret;
*ppos += cnt; *ppos += cnt;
ret = cnt; return cnt;
out:
return ret;
} }
static int event_trigger_regex_release(struct inode *inode, struct file *file) static int event_trigger_regex_release(struct inode *inode, struct file *file)
@ -359,20 +345,16 @@ const struct file_operations event_trigger_fops = {
__init int register_event_command(struct event_command *cmd) __init int register_event_command(struct event_command *cmd)
{ {
struct event_command *p; struct event_command *p;
int ret = 0;
mutex_lock(&trigger_cmd_mutex); guard(mutex)(&trigger_cmd_mutex);
list_for_each_entry(p, &trigger_commands, list) { list_for_each_entry(p, &trigger_commands, list) {
if (strcmp(cmd->name, p->name) == 0) { if (strcmp(cmd->name, p->name) == 0)
ret = -EBUSY; return -EBUSY;
goto out_unlock;
}
} }
list_add(&cmd->list, &trigger_commands); list_add(&cmd->list, &trigger_commands);
out_unlock:
mutex_unlock(&trigger_cmd_mutex);
return ret; return 0;
} }
/* /*
@ -382,20 +364,17 @@ __init int register_event_command(struct event_command *cmd)
__init int unregister_event_command(struct event_command *cmd) __init int unregister_event_command(struct event_command *cmd)
{ {
struct event_command *p, *n; struct event_command *p, *n;
int ret = -ENODEV;
mutex_lock(&trigger_cmd_mutex); guard(mutex)(&trigger_cmd_mutex);
list_for_each_entry_safe(p, n, &trigger_commands, list) { list_for_each_entry_safe(p, n, &trigger_commands, list) {
if (strcmp(cmd->name, p->name) == 0) { if (strcmp(cmd->name, p->name) == 0) {
ret = 0;
list_del_init(&p->list); list_del_init(&p->list);
goto out_unlock; return 0;
} }
} }
out_unlock:
mutex_unlock(&trigger_cmd_mutex);
return ret; return -ENODEV;
} }
/** /**

View File

@ -181,10 +181,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
struct trace_array *tr = gops->private; struct trace_array *tr = gops->private;
struct trace_array_cpu *data; struct trace_array_cpu *data;
struct fgraph_times *ftimes; struct fgraph_times *ftimes;
unsigned long flags;
unsigned int trace_ctx; unsigned int trace_ctx;
long disabled; long disabled;
int ret; int ret = 0;
int cpu; int cpu;
if (*task_var & TRACE_GRAPH_NOTRACE) if (*task_var & TRACE_GRAPH_NOTRACE)
@ -235,25 +234,21 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
if (tracing_thresh) if (tracing_thresh)
return 1; return 1;
local_irq_save(flags); preempt_disable_notrace();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu); data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled); disabled = atomic_read(&data->disabled);
if (likely(disabled == 1)) { if (likely(!disabled)) {
trace_ctx = tracing_gen_ctx_flags(flags); trace_ctx = tracing_gen_ctx();
if (unlikely(IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) && if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR))) { tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
unsigned long retaddr = ftrace_graph_top_ret_addr(current); unsigned long retaddr = ftrace_graph_top_ret_addr(current);
ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr); ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
} else
ret = __trace_graph_entry(tr, trace, trace_ctx);
} else { } else {
ret = 0; ret = __trace_graph_entry(tr, trace, trace_ctx);
} }
}
atomic_dec(&data->disabled); preempt_enable_notrace();
local_irq_restore(flags);
return ret; return ret;
} }
@ -320,7 +315,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
struct trace_array *tr = gops->private; struct trace_array *tr = gops->private;
struct trace_array_cpu *data; struct trace_array_cpu *data;
struct fgraph_times *ftimes; struct fgraph_times *ftimes;
unsigned long flags;
unsigned int trace_ctx; unsigned int trace_ctx;
long disabled; long disabled;
int size; int size;
@ -341,16 +335,15 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
trace->calltime = ftimes->calltime; trace->calltime = ftimes->calltime;
local_irq_save(flags); preempt_disable_notrace();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->array_buffer.data, cpu); data = per_cpu_ptr(tr->array_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled); disabled = atomic_read(&data->disabled);
if (likely(disabled == 1)) { if (likely(!disabled)) {
trace_ctx = tracing_gen_ctx_flags(flags); trace_ctx = tracing_gen_ctx();
__trace_graph_return(tr, trace, trace_ctx); __trace_graph_return(tr, trace, trace_ctx);
} }
atomic_dec(&data->disabled); preempt_enable_notrace();
local_irq_restore(flags);
} }
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace, static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,

View File

@ -634,7 +634,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
struct trace_kprobe *old_tk; struct trace_kprobe *old_tk;
int ret; int ret;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
old_tk = find_trace_kprobe(trace_probe_name(&tk->tp), old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
trace_probe_group_name(&tk->tp)); trace_probe_group_name(&tk->tp));
@ -642,11 +642,9 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) { if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
trace_probe_log_set_index(0); trace_probe_log_set_index(0);
trace_probe_log_err(0, DIFF_PROBE_TYPE); trace_probe_log_err(0, DIFF_PROBE_TYPE);
ret = -EEXIST; return -EEXIST;
} else {
ret = append_trace_kprobe(tk, old_tk);
} }
goto end; return append_trace_kprobe(tk, old_tk);
} }
/* Register new event */ /* Register new event */
@ -657,7 +655,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
trace_probe_log_err(0, EVENT_EXIST); trace_probe_log_err(0, EVENT_EXIST);
} else } else
pr_warn("Failed to register probe event(%d)\n", ret); pr_warn("Failed to register probe event(%d)\n", ret);
goto end; return ret;
} }
/* Register k*probe */ /* Register k*probe */
@ -672,8 +670,6 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
else else
dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp)); dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
end:
mutex_unlock(&event_mutex);
return ret; return ret;
} }
@ -706,7 +702,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
return NOTIFY_DONE; return NOTIFY_DONE;
/* Update probes on coming module */ /* Update probes on coming module */
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
for_each_trace_kprobe(tk, pos) { for_each_trace_kprobe(tk, pos) {
if (trace_kprobe_within_module(tk, mod)) { if (trace_kprobe_within_module(tk, mod)) {
/* Don't need to check busy - this should have gone. */ /* Don't need to check busy - this should have gone. */
@ -718,7 +714,6 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
module_name(mod), ret); module_name(mod), ret);
} }
} }
mutex_unlock(&event_mutex);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
@ -1970,13 +1965,12 @@ static __init void enable_boot_kprobe_events(void)
struct trace_kprobe *tk; struct trace_kprobe *tk;
struct dyn_event *pos; struct dyn_event *pos;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
for_each_trace_kprobe(tk, pos) { for_each_trace_kprobe(tk, pos) {
list_for_each_entry(file, &tr->events, list) list_for_each_entry(file, &tr->events, list)
if (file->event_call == trace_probe_event_call(&tk->tp)) if (file->event_call == trace_probe_event_call(&tk->tp))
trace_event_enable_disable(file, 1, 0); trace_event_enable_disable(file, 1, 0);
} }
mutex_unlock(&event_mutex);
} }
static __init void setup_boot_kprobe_events(void) static __init void setup_boot_kprobe_events(void)

View File

@ -2083,26 +2083,21 @@ static void osnoise_hotplug_workfn(struct work_struct *dummy)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
mutex_lock(&trace_types_lock); guard(mutex)(&trace_types_lock);
if (!osnoise_has_registered_instances()) if (!osnoise_has_registered_instances())
goto out_unlock_trace; return;
mutex_lock(&interface_lock); guard(mutex)(&interface_lock);
cpus_read_lock(); guard(cpus_read_lock)();
if (!cpu_online(cpu)) if (!cpu_online(cpu))
goto out_unlock; return;
if (!cpumask_test_cpu(cpu, &osnoise_cpumask)) if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
goto out_unlock; return;
start_kthread(cpu); start_kthread(cpu);
out_unlock:
cpus_read_unlock();
mutex_unlock(&interface_lock);
out_unlock_trace:
mutex_unlock(&trace_types_lock);
} }
static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn); static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
@ -2300,31 +2295,22 @@ static ssize_t
osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count, osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
loff_t *ppos) loff_t *ppos)
{ {
char *mask_str; char *mask_str __free(kfree) = NULL;
int len; int len;
mutex_lock(&interface_lock); guard(mutex)(&interface_lock);
len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1; len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
mask_str = kmalloc(len, GFP_KERNEL); mask_str = kmalloc(len, GFP_KERNEL);
if (!mask_str) { if (!mask_str)
count = -ENOMEM; return -ENOMEM;
goto out_unlock;
}
len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)); len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
if (len >= count) { if (len >= count)
count = -EINVAL; return -EINVAL;
goto out_free;
}
count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
out_free:
kfree(mask_str);
out_unlock:
mutex_unlock(&interface_lock);
return count; return count;
} }

View File

@ -520,20 +520,18 @@ stack_trace_sysctl(const struct ctl_table *table, int write, void *buffer,
int was_enabled; int was_enabled;
int ret; int ret;
mutex_lock(&stack_sysctl_mutex); guard(mutex)(&stack_sysctl_mutex);
was_enabled = !!stack_tracer_enabled; was_enabled = !!stack_tracer_enabled;
ret = proc_dointvec(table, write, buffer, lenp, ppos); ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write || (was_enabled == !!stack_tracer_enabled)) if (ret || !write || (was_enabled == !!stack_tracer_enabled))
goto out; return ret;
if (stack_tracer_enabled) if (stack_tracer_enabled)
register_ftrace_function(&trace_ops); register_ftrace_function(&trace_ops);
else else
unregister_ftrace_function(&trace_ops); unregister_ftrace_function(&trace_ops);
out:
mutex_unlock(&stack_sysctl_mutex);
return ret; return ret;
} }

View File

@ -128,7 +128,7 @@ static int stat_seq_init(struct stat_session *session)
int ret = 0; int ret = 0;
int i; int i;
mutex_lock(&session->stat_mutex); guard(mutex)(&session->stat_mutex);
__reset_stat_session(session); __reset_stat_session(session);
if (!ts->stat_cmp) if (!ts->stat_cmp)
@ -136,11 +136,11 @@ static int stat_seq_init(struct stat_session *session)
stat = ts->stat_start(ts); stat = ts->stat_start(ts);
if (!stat) if (!stat)
goto exit; return 0;
ret = insert_stat(root, stat, ts->stat_cmp); ret = insert_stat(root, stat, ts->stat_cmp);
if (ret) if (ret)
goto exit; return ret;
/* /*
* Iterate over the tracer stat entries and store them in an rbtree. * Iterate over the tracer stat entries and store them in an rbtree.
@ -157,13 +157,10 @@ static int stat_seq_init(struct stat_session *session)
goto exit_free_rbtree; goto exit_free_rbtree;
} }
exit:
mutex_unlock(&session->stat_mutex);
return ret; return ret;
exit_free_rbtree: exit_free_rbtree:
__reset_stat_session(session); __reset_stat_session(session);
mutex_unlock(&session->stat_mutex);
return ret; return ret;
} }
@ -308,7 +305,7 @@ static int init_stat_file(struct stat_session *session)
int register_stat_tracer(struct tracer_stat *trace) int register_stat_tracer(struct tracer_stat *trace)
{ {
struct stat_session *session, *node; struct stat_session *session, *node;
int ret = -EINVAL; int ret;
if (!trace) if (!trace)
return -EINVAL; return -EINVAL;
@ -316,18 +313,18 @@ int register_stat_tracer(struct tracer_stat *trace)
if (!trace->stat_start || !trace->stat_next || !trace->stat_show) if (!trace->stat_start || !trace->stat_next || !trace->stat_show)
return -EINVAL; return -EINVAL;
guard(mutex)(&all_stat_sessions_mutex);
/* Already registered? */ /* Already registered? */
mutex_lock(&all_stat_sessions_mutex);
list_for_each_entry(node, &all_stat_sessions, session_list) { list_for_each_entry(node, &all_stat_sessions, session_list) {
if (node->ts == trace) if (node->ts == trace)
goto out; return -EINVAL;
} }
ret = -ENOMEM;
/* Init the session */ /* Init the session */
session = kzalloc(sizeof(*session), GFP_KERNEL); session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session) if (!session)
goto out; return -ENOMEM;
session->ts = trace; session->ts = trace;
INIT_LIST_HEAD(&session->session_list); INIT_LIST_HEAD(&session->session_list);
@ -336,16 +333,13 @@ int register_stat_tracer(struct tracer_stat *trace)
ret = init_stat_file(session); ret = init_stat_file(session);
if (ret) { if (ret) {
destroy_session(session); destroy_session(session);
goto out; return ret;
} }
ret = 0;
/* Register */ /* Register */
list_add_tail(&session->session_list, &all_stat_sessions); list_add_tail(&session->session_list, &all_stat_sessions);
out:
mutex_unlock(&all_stat_sessions_mutex);
return ret; return 0;
} }
void unregister_stat_tracer(struct tracer_stat *trace) void unregister_stat_tracer(struct tracer_stat *trace)

View File

@ -498,11 +498,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
struct trace_uprobe *old_tu; struct trace_uprobe *old_tu;
int ret; int ret;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
ret = validate_ref_ctr_offset(tu); ret = validate_ref_ctr_offset(tu);
if (ret) if (ret)
goto end; return ret;
/* register as an event */ /* register as an event */
old_tu = find_probe_event(trace_probe_name(&tu->tp), old_tu = find_probe_event(trace_probe_name(&tu->tp),
@ -511,11 +511,9 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
if (is_ret_probe(tu) != is_ret_probe(old_tu)) { if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
trace_probe_log_set_index(0); trace_probe_log_set_index(0);
trace_probe_log_err(0, DIFF_PROBE_TYPE); trace_probe_log_err(0, DIFF_PROBE_TYPE);
ret = -EEXIST; return -EEXIST;
} else {
ret = append_trace_uprobe(tu, old_tu);
} }
goto end; return append_trace_uprobe(tu, old_tu);
} }
ret = register_uprobe_event(tu); ret = register_uprobe_event(tu);
@ -525,14 +523,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
trace_probe_log_err(0, EVENT_EXIST); trace_probe_log_err(0, EVENT_EXIST);
} else } else
pr_warn("Failed to register probe event(%d)\n", ret); pr_warn("Failed to register probe event(%d)\n", ret);
goto end; return ret;
} }
dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
end:
mutex_unlock(&event_mutex);
return ret; return ret;
} }

View File

@ -64,14 +64,141 @@
#define EM_LOONGARCH 258 #define EM_LOONGARCH 258
#endif #endif
typedef union {
Elf32_Ehdr e32;
Elf64_Ehdr e64;
} Elf_Ehdr;
typedef union {
Elf32_Shdr e32;
Elf64_Shdr e64;
} Elf_Shdr;
typedef union {
Elf32_Sym e32;
Elf64_Sym e64;
} Elf_Sym;
static uint32_t (*r)(const uint32_t *); static uint32_t (*r)(const uint32_t *);
static uint16_t (*r2)(const uint16_t *); static uint16_t (*r2)(const uint16_t *);
static uint64_t (*r8)(const uint64_t *); static uint64_t (*r8)(const uint64_t *);
static void (*w)(uint32_t, uint32_t *); static void (*w)(uint32_t, uint32_t *);
static void (*w2)(uint16_t, uint16_t *);
static void (*w8)(uint64_t, uint64_t *);
typedef void (*table_sort_t)(char *, int); typedef void (*table_sort_t)(char *, int);
static uint64_t ehdr64_shoff(Elf_Ehdr *ehdr)
{
return r8(&ehdr->e64.e_shoff);
}
static uint64_t ehdr32_shoff(Elf_Ehdr *ehdr)
{
return r(&ehdr->e32.e_shoff);
}
#define EHDR_HALF(fn_name) \
static uint16_t ehdr64_##fn_name(Elf_Ehdr *ehdr) \
{ \
return r2(&ehdr->e64.e_##fn_name); \
} \
\
static uint16_t ehdr32_##fn_name(Elf_Ehdr *ehdr) \
{ \
return r2(&ehdr->e32.e_##fn_name); \
}
EHDR_HALF(shentsize)
EHDR_HALF(shstrndx)
EHDR_HALF(shnum)
#define SHDR_WORD(fn_name) \
static uint32_t shdr64_##fn_name(Elf_Shdr *shdr) \
{ \
return r(&shdr->e64.sh_##fn_name); \
} \
\
static uint32_t shdr32_##fn_name(Elf_Shdr *shdr) \
{ \
return r(&shdr->e32.sh_##fn_name); \
}
#define SHDR_ADDR(fn_name) \
static uint64_t shdr64_##fn_name(Elf_Shdr *shdr) \
{ \
return r8(&shdr->e64.sh_##fn_name); \
} \
\
static uint64_t shdr32_##fn_name(Elf_Shdr *shdr) \
{ \
return r(&shdr->e32.sh_##fn_name); \
}
#define SHDR_WORD(fn_name) \
static uint32_t shdr64_##fn_name(Elf_Shdr *shdr) \
{ \
return r(&shdr->e64.sh_##fn_name); \
} \
\
static uint32_t shdr32_##fn_name(Elf_Shdr *shdr) \
{ \
return r(&shdr->e32.sh_##fn_name); \
}
SHDR_ADDR(addr)
SHDR_ADDR(offset)
SHDR_ADDR(size)
SHDR_ADDR(entsize)
SHDR_WORD(link)
SHDR_WORD(name)
SHDR_WORD(type)
#define SYM_ADDR(fn_name) \
static uint64_t sym64_##fn_name(Elf_Sym *sym) \
{ \
return r8(&sym->e64.st_##fn_name); \
} \
\
static uint64_t sym32_##fn_name(Elf_Sym *sym) \
{ \
return r(&sym->e32.st_##fn_name); \
}
#define SYM_WORD(fn_name) \
static uint32_t sym64_##fn_name(Elf_Sym *sym) \
{ \
return r(&sym->e64.st_##fn_name); \
} \
\
static uint32_t sym32_##fn_name(Elf_Sym *sym) \
{ \
return r(&sym->e32.st_##fn_name); \
}
#define SYM_HALF(fn_name) \
static uint16_t sym64_##fn_name(Elf_Sym *sym) \
{ \
return r2(&sym->e64.st_##fn_name); \
} \
\
static uint16_t sym32_##fn_name(Elf_Sym *sym) \
{ \
return r2(&sym->e32.st_##fn_name); \
}
static uint8_t sym64_type(Elf_Sym *sym)
{
return ELF64_ST_TYPE(sym->e64.st_info);
}
static uint8_t sym32_type(Elf_Sym *sym)
{
return ELF32_ST_TYPE(sym->e32.st_info);
}
SYM_ADDR(value)
SYM_WORD(name)
SYM_HALF(shndx)
/* /*
* Get the whole file as a programming convenience in order to avoid * Get the whole file as a programming convenience in order to avoid
* malloc+lseek+read+free of many pieces. If successful, then mmap * malloc+lseek+read+free of many pieces. If successful, then mmap
@ -146,31 +273,11 @@ static void wbe(uint32_t val, uint32_t *x)
put_unaligned_be32(val, x); put_unaligned_be32(val, x);
} }
static void w2be(uint16_t val, uint16_t *x)
{
put_unaligned_be16(val, x);
}
static void w8be(uint64_t val, uint64_t *x)
{
put_unaligned_be64(val, x);
}
static void wle(uint32_t val, uint32_t *x) static void wle(uint32_t val, uint32_t *x)
{ {
put_unaligned_le32(val, x); put_unaligned_le32(val, x);
} }
static void w2le(uint16_t val, uint16_t *x)
{
put_unaligned_le16(val, x);
}
static void w8le(uint64_t val, uint64_t *x)
{
put_unaligned_le64(val, x);
}
/* /*
* Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of * Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
* the way to -256..-1, to avoid conflicting with real section * the way to -256..-1, to avoid conflicting with real section
@ -195,10 +302,443 @@ static inline unsigned int get_secindex(unsigned int shndx,
return r(&symtab_shndx_start[sym_offs]); return r(&symtab_shndx_start[sym_offs]);
} }
/* 32 bit and 64 bit are very similar */ static int compare_extable_32(const void *a, const void *b)
#include "sorttable.h" {
#define SORTTABLE_64 Elf32_Addr av = r(a);
#include "sorttable.h" Elf32_Addr bv = r(b);
if (av < bv)
return -1;
return av > bv;
}
static int compare_extable_64(const void *a, const void *b)
{
Elf64_Addr av = r8(a);
Elf64_Addr bv = r8(b);
if (av < bv)
return -1;
return av > bv;
}
static inline void *get_index(void *start, int entsize, int index)
{
return start + (entsize * index);
}
static int (*compare_extable)(const void *a, const void *b);
static uint64_t (*ehdr_shoff)(Elf_Ehdr *ehdr);
static uint16_t (*ehdr_shstrndx)(Elf_Ehdr *ehdr);
static uint16_t (*ehdr_shentsize)(Elf_Ehdr *ehdr);
static uint16_t (*ehdr_shnum)(Elf_Ehdr *ehdr);
static uint64_t (*shdr_addr)(Elf_Shdr *shdr);
static uint64_t (*shdr_offset)(Elf_Shdr *shdr);
static uint64_t (*shdr_size)(Elf_Shdr *shdr);
static uint64_t (*shdr_entsize)(Elf_Shdr *shdr);
static uint32_t (*shdr_link)(Elf_Shdr *shdr);
static uint32_t (*shdr_name)(Elf_Shdr *shdr);
static uint32_t (*shdr_type)(Elf_Shdr *shdr);
static uint8_t (*sym_type)(Elf_Sym *sym);
static uint32_t (*sym_name)(Elf_Sym *sym);
static uint64_t (*sym_value)(Elf_Sym *sym);
static uint16_t (*sym_shndx)(Elf_Sym *sym);
static int extable_ent_size;
static int long_size;
#ifdef UNWINDER_ORC_ENABLED
/* ORC unwinder only support X86_64 */
#include <asm/orc_types.h>
#define ERRSTR_MAXSZ 256
static char g_err[ERRSTR_MAXSZ];
static int *g_orc_ip_table;
static struct orc_entry *g_orc_table;
static pthread_t orc_sort_thread;
static inline unsigned long orc_ip(const int *ip)
{
return (unsigned long)ip + *ip;
}
static int orc_sort_cmp(const void *_a, const void *_b)
{
struct orc_entry *orc_a, *orc_b;
const int *a = g_orc_ip_table + *(int *)_a;
const int *b = g_orc_ip_table + *(int *)_b;
unsigned long a_val = orc_ip(a);
unsigned long b_val = orc_ip(b);
if (a_val > b_val)
return 1;
if (a_val < b_val)
return -1;
/*
* The "weak" section terminator entries need to always be on the left
* to ensure the lookup code skips them in favor of real entries.
* These terminator entries exist to handle any gaps created by
* whitelisted .o files which didn't get objtool generation.
*/
orc_a = g_orc_table + (a - g_orc_ip_table);
orc_b = g_orc_table + (b - g_orc_ip_table);
if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED)
return 0;
return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
}
static void *sort_orctable(void *arg)
{
int i;
int *idxs = NULL;
int *tmp_orc_ip_table = NULL;
struct orc_entry *tmp_orc_table = NULL;
unsigned int *orc_ip_size = (unsigned int *)arg;
unsigned int num_entries = *orc_ip_size / sizeof(int);
unsigned int orc_size = num_entries * sizeof(struct orc_entry);
idxs = (int *)malloc(*orc_ip_size);
if (!idxs) {
snprintf(g_err, ERRSTR_MAXSZ, "malloc idxs: %s",
strerror(errno));
pthread_exit(g_err);
}
tmp_orc_ip_table = (int *)malloc(*orc_ip_size);
if (!tmp_orc_ip_table) {
snprintf(g_err, ERRSTR_MAXSZ, "malloc tmp_orc_ip_table: %s",
strerror(errno));
pthread_exit(g_err);
}
tmp_orc_table = (struct orc_entry *)malloc(orc_size);
if (!tmp_orc_table) {
snprintf(g_err, ERRSTR_MAXSZ, "malloc tmp_orc_table: %s",
strerror(errno));
pthread_exit(g_err);
}
/* initialize indices array, convert ip_table to absolute address */
for (i = 0; i < num_entries; i++) {
idxs[i] = i;
tmp_orc_ip_table[i] = g_orc_ip_table[i] + i * sizeof(int);
}
memcpy(tmp_orc_table, g_orc_table, orc_size);
qsort(idxs, num_entries, sizeof(int), orc_sort_cmp);
for (i = 0; i < num_entries; i++) {
if (idxs[i] == i)
continue;
/* convert back to relative address */
g_orc_ip_table[i] = tmp_orc_ip_table[idxs[i]] - i * sizeof(int);
g_orc_table[i] = tmp_orc_table[idxs[i]];
}
free(idxs);
free(tmp_orc_ip_table);
free(tmp_orc_table);
pthread_exit(NULL);
}
#endif
#ifdef MCOUNT_SORT_ENABLED
static pthread_t mcount_sort_thread;
struct elf_mcount_loc {
Elf_Ehdr *ehdr;
Elf_Shdr *init_data_sec;
uint64_t start_mcount_loc;
uint64_t stop_mcount_loc;
};
/* Sort the addresses stored between __start_mcount_loc to __stop_mcount_loc in vmlinux */
static void *sort_mcount_loc(void *arg)
{
struct elf_mcount_loc *emloc = (struct elf_mcount_loc *)arg;
uint64_t offset = emloc->start_mcount_loc - shdr_addr(emloc->init_data_sec)
+ shdr_offset(emloc->init_data_sec);
uint64_t count = emloc->stop_mcount_loc - emloc->start_mcount_loc;
unsigned char *start_loc = (void *)emloc->ehdr + offset;
qsort(start_loc, count/long_size, long_size, compare_extable);
return NULL;
}
/* Get the address of __start_mcount_loc and __stop_mcount_loc in System.map */
static void get_mcount_loc(struct elf_mcount_loc *emloc, Elf_Shdr *symtab_sec,
const char *strtab)
{
Elf_Sym *sym, *end_sym;
int symentsize = shdr_entsize(symtab_sec);
int found = 0;
sym = (void *)emloc->ehdr + shdr_offset(symtab_sec);
end_sym = (void *)sym + shdr_size(symtab_sec);
while (sym < end_sym) {
if (!strcmp(strtab + sym_name(sym), "__start_mcount_loc")) {
emloc->start_mcount_loc = sym_value(sym);
if (++found == 2)
break;
} else if (!strcmp(strtab + sym_name(sym), "__stop_mcount_loc")) {
emloc->stop_mcount_loc = sym_value(sym);
if (++found == 2)
break;
}
sym = (void *)sym + symentsize;
}
if (!emloc->start_mcount_loc) {
fprintf(stderr, "get start_mcount_loc error!");
return;
}
if (!emloc->stop_mcount_loc) {
fprintf(stderr, "get stop_mcount_loc error!");
return;
}
}
#endif
static int do_sort(Elf_Ehdr *ehdr,
char const *const fname,
table_sort_t custom_sort)
{
int rc = -1;
Elf_Shdr *shdr_start;
Elf_Shdr *strtab_sec = NULL;
Elf_Shdr *symtab_sec = NULL;
Elf_Shdr *extab_sec = NULL;
Elf_Shdr *string_sec;
Elf_Sym *sym;
const Elf_Sym *symtab;
Elf32_Word *symtab_shndx = NULL;
Elf_Sym *sort_needed_sym = NULL;
Elf_Shdr *sort_needed_sec;
uint32_t *sort_needed_loc;
void *sym_start;
void *sym_end;
const char *secstrings;
const char *strtab;
char *extab_image;
int sort_need_index;
int symentsize;
int shentsize;
int idx;
int i;
unsigned int shnum;
unsigned int shstrndx;
#ifdef MCOUNT_SORT_ENABLED
struct elf_mcount_loc mstruct = {0};
#endif
#ifdef UNWINDER_ORC_ENABLED
unsigned int orc_ip_size = 0;
unsigned int orc_size = 0;
unsigned int orc_num_entries = 0;
#endif
shdr_start = (Elf_Shdr *)((char *)ehdr + ehdr_shoff(ehdr));
shentsize = ehdr_shentsize(ehdr);
shstrndx = ehdr_shstrndx(ehdr);
if (shstrndx == SHN_XINDEX)
shstrndx = shdr_link(shdr_start);
string_sec = get_index(shdr_start, shentsize, shstrndx);
secstrings = (const char *)ehdr + shdr_offset(string_sec);
shnum = ehdr_shnum(ehdr);
if (shnum == SHN_UNDEF)
shnum = shdr_size(shdr_start);
for (i = 0; i < shnum; i++) {
Elf_Shdr *shdr = get_index(shdr_start, shentsize, i);
idx = shdr_name(shdr);
if (!strcmp(secstrings + idx, "__ex_table"))
extab_sec = shdr;
if (!strcmp(secstrings + idx, ".symtab"))
symtab_sec = shdr;
if (!strcmp(secstrings + idx, ".strtab"))
strtab_sec = shdr;
if (shdr_type(shdr) == SHT_SYMTAB_SHNDX)
symtab_shndx = (Elf32_Word *)((const char *)ehdr +
shdr_offset(shdr));
#ifdef MCOUNT_SORT_ENABLED
/* locate the .init.data section in vmlinux */
if (!strcmp(secstrings + idx, ".init.data"))
mstruct.init_data_sec = shdr;
#endif
#ifdef UNWINDER_ORC_ENABLED
/* locate the ORC unwind tables */
if (!strcmp(secstrings + idx, ".orc_unwind_ip")) {
orc_ip_size = shdr_size(shdr);
g_orc_ip_table = (int *)((void *)ehdr +
shdr_offset(shdr));
}
if (!strcmp(secstrings + idx, ".orc_unwind")) {
orc_size = shdr_size(shdr);
g_orc_table = (struct orc_entry *)((void *)ehdr +
shdr_offset(shdr));
}
#endif
} /* for loop */
#ifdef UNWINDER_ORC_ENABLED
if (!g_orc_ip_table || !g_orc_table) {
fprintf(stderr,
"incomplete ORC unwind tables in file: %s\n", fname);
goto out;
}
orc_num_entries = orc_ip_size / sizeof(int);
if (orc_ip_size % sizeof(int) != 0 ||
orc_size % sizeof(struct orc_entry) != 0 ||
orc_num_entries != orc_size / sizeof(struct orc_entry)) {
fprintf(stderr,
"inconsistent ORC unwind table entries in file: %s\n",
fname);
goto out;
}
/* create thread to sort ORC unwind tables concurrently */
if (pthread_create(&orc_sort_thread, NULL,
sort_orctable, &orc_ip_size)) {
fprintf(stderr,
"pthread_create orc_sort_thread failed '%s': %s\n",
strerror(errno), fname);
goto out;
}
#endif
if (!extab_sec) {
fprintf(stderr, "no __ex_table in file: %s\n", fname);
goto out;
}
if (!symtab_sec) {
fprintf(stderr, "no .symtab in file: %s\n", fname);
goto out;
}
if (!strtab_sec) {
fprintf(stderr, "no .strtab in file: %s\n", fname);
goto out;
}
extab_image = (void *)ehdr + shdr_offset(extab_sec);
strtab = (const char *)ehdr + shdr_offset(strtab_sec);
symtab = (const Elf_Sym *)((const char *)ehdr + shdr_offset(symtab_sec));
#ifdef MCOUNT_SORT_ENABLED
mstruct.ehdr = ehdr;
get_mcount_loc(&mstruct, symtab_sec, strtab);
if (!mstruct.init_data_sec || !mstruct.start_mcount_loc || !mstruct.stop_mcount_loc) {
fprintf(stderr,
"incomplete mcount's sort in file: %s\n",
fname);
goto out;
}
/* create thread to sort mcount_loc concurrently */
if (pthread_create(&mcount_sort_thread, NULL, &sort_mcount_loc, &mstruct)) {
fprintf(stderr,
"pthread_create mcount_sort_thread failed '%s': %s\n",
strerror(errno), fname);
goto out;
}
#endif
if (custom_sort) {
custom_sort(extab_image, shdr_size(extab_sec));
} else {
int num_entries = shdr_size(extab_sec) / extable_ent_size;
qsort(extab_image, num_entries,
extable_ent_size, compare_extable);
}
/* find the flag main_extable_sort_needed */
sym_start = (void *)ehdr + shdr_offset(symtab_sec);
sym_end = sym_start + shdr_size(symtab_sec);
symentsize = shdr_entsize(symtab_sec);
for (sym = sym_start; (void *)sym + symentsize < sym_end;
sym = (void *)sym + symentsize) {
if (sym_type(sym) != STT_OBJECT)
continue;
if (!strcmp(strtab + sym_name(sym),
"main_extable_sort_needed")) {
sort_needed_sym = sym;
break;
}
}
if (!sort_needed_sym) {
fprintf(stderr,
"no main_extable_sort_needed symbol in file: %s\n",
fname);
goto out;
}
sort_need_index = get_secindex(sym_shndx(sym),
((void *)sort_needed_sym - (void *)symtab) / symentsize,
symtab_shndx);
sort_needed_sec = get_index(shdr_start, shentsize, sort_need_index);
sort_needed_loc = (void *)ehdr +
shdr_offset(sort_needed_sec) +
sym_value(sort_needed_sym) - shdr_addr(sort_needed_sec);
/* extable has been sorted, clear the flag */
w(0, sort_needed_loc);
rc = 0;
out:
#ifdef UNWINDER_ORC_ENABLED
if (orc_sort_thread) {
void *retval = NULL;
/* wait for ORC tables sort done */
rc = pthread_join(orc_sort_thread, &retval);
if (rc) {
fprintf(stderr,
"pthread_join failed '%s': %s\n",
strerror(errno), fname);
} else if (retval) {
rc = -1;
fprintf(stderr,
"failed to sort ORC tables '%s': %s\n",
(char *)retval, fname);
}
}
#endif
#ifdef MCOUNT_SORT_ENABLED
if (mcount_sort_thread) {
void *retval = NULL;
/* wait for mcount sort done */
rc = pthread_join(mcount_sort_thread, &retval);
if (rc) {
fprintf(stderr,
"pthread_join failed '%s': %s\n",
strerror(errno), fname);
} else if (retval) {
rc = -1;
fprintf(stderr,
"failed to sort mcount '%s': %s\n",
(char *)retval, fname);
}
}
#endif
return rc;
}
static int compare_relative_table(const void *a, const void *b) static int compare_relative_table(const void *a, const void *b)
{ {
@ -267,41 +807,36 @@ static void sort_relative_table_with_data(char *extab_image, int image_size)
static int do_file(char const *const fname, void *addr) static int do_file(char const *const fname, void *addr)
{ {
int rc = -1; Elf_Ehdr *ehdr = addr;
Elf32_Ehdr *ehdr = addr;
table_sort_t custom_sort = NULL; table_sort_t custom_sort = NULL;
switch (ehdr->e_ident[EI_DATA]) { switch (ehdr->e32.e_ident[EI_DATA]) {
case ELFDATA2LSB: case ELFDATA2LSB:
r = rle; r = rle;
r2 = r2le; r2 = r2le;
r8 = r8le; r8 = r8le;
w = wle; w = wle;
w2 = w2le;
w8 = w8le;
break; break;
case ELFDATA2MSB: case ELFDATA2MSB:
r = rbe; r = rbe;
r2 = r2be; r2 = r2be;
r8 = r8be; r8 = r8be;
w = wbe; w = wbe;
w2 = w2be;
w8 = w8be;
break; break;
default: default:
fprintf(stderr, "unrecognized ELF data encoding %d: %s\n", fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
ehdr->e_ident[EI_DATA], fname); ehdr->e32.e_ident[EI_DATA], fname);
return -1; return -1;
} }
if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0 || if (memcmp(ELFMAG, ehdr->e32.e_ident, SELFMAG) != 0 ||
(r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN) || (r2(&ehdr->e32.e_type) != ET_EXEC && r2(&ehdr->e32.e_type) != ET_DYN) ||
ehdr->e_ident[EI_VERSION] != EV_CURRENT) { ehdr->e32.e_ident[EI_VERSION] != EV_CURRENT) {
fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname); fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
return -1; return -1;
} }
switch (r2(&ehdr->e_machine)) { switch (r2(&ehdr->e32.e_machine)) {
case EM_386: case EM_386:
case EM_AARCH64: case EM_AARCH64:
case EM_LOONGARCH: case EM_LOONGARCH:
@ -324,40 +859,74 @@ static int do_file(char const *const fname, void *addr)
break; break;
default: default:
fprintf(stderr, "unrecognized e_machine %d %s\n", fprintf(stderr, "unrecognized e_machine %d %s\n",
r2(&ehdr->e_machine), fname); r2(&ehdr->e32.e_machine), fname);
return -1; return -1;
} }
switch (ehdr->e_ident[EI_CLASS]) { switch (ehdr->e32.e_ident[EI_CLASS]) {
case ELFCLASS32: case ELFCLASS32:
if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr) || if (r2(&ehdr->e32.e_ehsize) != sizeof(Elf32_Ehdr) ||
r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) { r2(&ehdr->e32.e_shentsize) != sizeof(Elf32_Shdr)) {
fprintf(stderr, fprintf(stderr,
"unrecognized ET_EXEC/ET_DYN file: %s\n", fname); "unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
break; return -1;
} }
rc = do_sort_32(ehdr, fname, custom_sort);
compare_extable = compare_extable_32;
ehdr_shoff = ehdr32_shoff;
ehdr_shentsize = ehdr32_shentsize;
ehdr_shstrndx = ehdr32_shstrndx;
ehdr_shnum = ehdr32_shnum;
shdr_addr = shdr32_addr;
shdr_offset = shdr32_offset;
shdr_link = shdr32_link;
shdr_size = shdr32_size;
shdr_name = shdr32_name;
shdr_type = shdr32_type;
shdr_entsize = shdr32_entsize;
sym_type = sym32_type;
sym_name = sym32_name;
sym_value = sym32_value;
sym_shndx = sym32_shndx;
long_size = 4;
extable_ent_size = 8;
break; break;
case ELFCLASS64: case ELFCLASS64:
{ if (r2(&ehdr->e64.e_ehsize) != sizeof(Elf64_Ehdr) ||
Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr; r2(&ehdr->e64.e_shentsize) != sizeof(Elf64_Shdr)) {
if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr) ||
r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
fprintf(stderr, fprintf(stderr,
"unrecognized ET_EXEC/ET_DYN file: %s\n", "unrecognized ET_EXEC/ET_DYN file: %s\n",
fname); fname);
break; return -1;
}
rc = do_sort_64(ghdr, fname, custom_sort);
} }
compare_extable = compare_extable_64;
ehdr_shoff = ehdr64_shoff;
ehdr_shentsize = ehdr64_shentsize;
ehdr_shstrndx = ehdr64_shstrndx;
ehdr_shnum = ehdr64_shnum;
shdr_addr = shdr64_addr;
shdr_offset = shdr64_offset;
shdr_link = shdr64_link;
shdr_size = shdr64_size;
shdr_name = shdr64_name;
shdr_type = shdr64_type;
shdr_entsize = shdr64_entsize;
sym_type = sym64_type;
sym_name = sym64_name;
sym_value = sym64_value;
sym_shndx = sym64_shndx;
long_size = 8;
extable_ent_size = 16;
break; break;
default: default:
fprintf(stderr, "unrecognized ELF class %d %s\n", fprintf(stderr, "unrecognized ELF class %d %s\n",
ehdr->e_ident[EI_CLASS], fname); ehdr->e32.e_ident[EI_CLASS], fname);
break; return -1;
} }
return rc; return do_sort(ehdr, fname, custom_sort);
} }
int main(int argc, char *argv[]) int main(int argc, char *argv[])

View File

@ -1,500 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* sorttable.h
*
* Added ORC unwind tables sort support and other updates:
* Copyright (C) 1999-2019 Alibaba Group Holding Limited. by:
* Shile Zhang <shile.zhang@linux.alibaba.com>
*
* Copyright 2011 - 2012 Cavium, Inc.
*
* Some of code was taken out of arch/x86/kernel/unwind_orc.c, written by:
* Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
*
* Some of this code was taken out of recordmcount.h written by:
*
* Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
* Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
*/
#undef extable_ent_size
#undef compare_extable
#undef get_mcount_loc
#undef sort_mcount_loc
#undef elf_mcount_loc
#undef do_sort
#undef Elf_Addr
#undef Elf_Ehdr
#undef Elf_Shdr
#undef Elf_Rel
#undef Elf_Rela
#undef Elf_Sym
#undef ELF_R_SYM
#undef Elf_r_sym
#undef ELF_R_INFO
#undef Elf_r_info
#undef ELF_ST_BIND
#undef ELF_ST_TYPE
#undef fn_ELF_R_SYM
#undef fn_ELF_R_INFO
#undef uint_t
#undef _r
#undef _w
#ifdef SORTTABLE_64
# define extable_ent_size 16
# define compare_extable compare_extable_64
# define get_mcount_loc get_mcount_loc_64
# define sort_mcount_loc sort_mcount_loc_64
# define elf_mcount_loc elf_mcount_loc_64
# define do_sort do_sort_64
# define Elf_Addr Elf64_Addr
# define Elf_Ehdr Elf64_Ehdr
# define Elf_Shdr Elf64_Shdr
# define Elf_Rel Elf64_Rel
# define Elf_Rela Elf64_Rela
# define Elf_Sym Elf64_Sym
# define ELF_R_SYM ELF64_R_SYM
# define Elf_r_sym Elf64_r_sym
# define ELF_R_INFO ELF64_R_INFO
# define Elf_r_info Elf64_r_info
# define ELF_ST_BIND ELF64_ST_BIND
# define ELF_ST_TYPE ELF64_ST_TYPE
# define fn_ELF_R_SYM fn_ELF64_R_SYM
# define fn_ELF_R_INFO fn_ELF64_R_INFO
# define uint_t uint64_t
# define _r r8
# define _w w8
#else
# define extable_ent_size 8
# define compare_extable compare_extable_32
# define get_mcount_loc get_mcount_loc_32
# define sort_mcount_loc sort_mcount_loc_32
# define elf_mcount_loc elf_mcount_loc_32
# define do_sort do_sort_32
# define Elf_Addr Elf32_Addr
# define Elf_Ehdr Elf32_Ehdr
# define Elf_Shdr Elf32_Shdr
# define Elf_Rel Elf32_Rel
# define Elf_Rela Elf32_Rela
# define Elf_Sym Elf32_Sym
# define ELF_R_SYM ELF32_R_SYM
# define Elf_r_sym Elf32_r_sym
# define ELF_R_INFO ELF32_R_INFO
# define Elf_r_info Elf32_r_info
# define ELF_ST_BIND ELF32_ST_BIND
# define ELF_ST_TYPE ELF32_ST_TYPE
# define fn_ELF_R_SYM fn_ELF32_R_SYM
# define fn_ELF_R_INFO fn_ELF32_R_INFO
# define uint_t uint32_t
# define _r r
# define _w w
#endif
#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
/* ORC unwinder only support X86_64 */
#include <asm/orc_types.h>
#define ERRSTR_MAXSZ 256
char g_err[ERRSTR_MAXSZ];
int *g_orc_ip_table;
struct orc_entry *g_orc_table;
pthread_t orc_sort_thread;
static inline unsigned long orc_ip(const int *ip)
{
return (unsigned long)ip + *ip;
}
static int orc_sort_cmp(const void *_a, const void *_b)
{
struct orc_entry *orc_a, *orc_b;
const int *a = g_orc_ip_table + *(int *)_a;
const int *b = g_orc_ip_table + *(int *)_b;
unsigned long a_val = orc_ip(a);
unsigned long b_val = orc_ip(b);
if (a_val > b_val)
return 1;
if (a_val < b_val)
return -1;
/*
* The "weak" section terminator entries need to always be on the left
* to ensure the lookup code skips them in favor of real entries.
* These terminator entries exist to handle any gaps created by
* whitelisted .o files which didn't get objtool generation.
*/
orc_a = g_orc_table + (a - g_orc_ip_table);
orc_b = g_orc_table + (b - g_orc_ip_table);
if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED)
return 0;
return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
}
static void *sort_orctable(void *arg)
{
int i;
int *idxs = NULL;
int *tmp_orc_ip_table = NULL;
struct orc_entry *tmp_orc_table = NULL;
unsigned int *orc_ip_size = (unsigned int *)arg;
unsigned int num_entries = *orc_ip_size / sizeof(int);
unsigned int orc_size = num_entries * sizeof(struct orc_entry);
idxs = (int *)malloc(*orc_ip_size);
if (!idxs) {
snprintf(g_err, ERRSTR_MAXSZ, "malloc idxs: %s",
strerror(errno));
pthread_exit(g_err);
}
tmp_orc_ip_table = (int *)malloc(*orc_ip_size);
if (!tmp_orc_ip_table) {
snprintf(g_err, ERRSTR_MAXSZ, "malloc tmp_orc_ip_table: %s",
strerror(errno));
pthread_exit(g_err);
}
tmp_orc_table = (struct orc_entry *)malloc(orc_size);
if (!tmp_orc_table) {
snprintf(g_err, ERRSTR_MAXSZ, "malloc tmp_orc_table: %s",
strerror(errno));
pthread_exit(g_err);
}
/* initialize indices array, convert ip_table to absolute address */
for (i = 0; i < num_entries; i++) {
idxs[i] = i;
tmp_orc_ip_table[i] = g_orc_ip_table[i] + i * sizeof(int);
}
memcpy(tmp_orc_table, g_orc_table, orc_size);
qsort(idxs, num_entries, sizeof(int), orc_sort_cmp);
for (i = 0; i < num_entries; i++) {
if (idxs[i] == i)
continue;
/* convert back to relative address */
g_orc_ip_table[i] = tmp_orc_ip_table[idxs[i]] - i * sizeof(int);
g_orc_table[i] = tmp_orc_table[idxs[i]];
}
free(idxs);
free(tmp_orc_ip_table);
free(tmp_orc_table);
pthread_exit(NULL);
}
#endif
static int compare_extable(const void *a, const void *b)
{
Elf_Addr av = _r(a);
Elf_Addr bv = _r(b);
if (av < bv)
return -1;
if (av > bv)
return 1;
return 0;
}
#ifdef MCOUNT_SORT_ENABLED
pthread_t mcount_sort_thread;
struct elf_mcount_loc {
Elf_Ehdr *ehdr;
Elf_Shdr *init_data_sec;
uint_t start_mcount_loc;
uint_t stop_mcount_loc;
};
/* Sort the addresses stored between __start_mcount_loc to __stop_mcount_loc in vmlinux */
static void *sort_mcount_loc(void *arg)
{
struct elf_mcount_loc *emloc = (struct elf_mcount_loc *)arg;
uint_t offset = emloc->start_mcount_loc - _r(&(emloc->init_data_sec)->sh_addr)
+ _r(&(emloc->init_data_sec)->sh_offset);
uint_t count = emloc->stop_mcount_loc - emloc->start_mcount_loc;
unsigned char *start_loc = (void *)emloc->ehdr + offset;
qsort(start_loc, count/sizeof(uint_t), sizeof(uint_t), compare_extable);
return NULL;
}
/* Get the address of __start_mcount_loc and __stop_mcount_loc in System.map */
static void get_mcount_loc(uint_t *_start, uint_t *_stop)
{
FILE *file_start, *file_stop;
char start_buff[20];
char stop_buff[20];
int len = 0;
file_start = popen(" grep start_mcount System.map | awk '{print $1}' ", "r");
if (!file_start) {
fprintf(stderr, "get start_mcount_loc error!");
return;
}
file_stop = popen(" grep stop_mcount System.map | awk '{print $1}' ", "r");
if (!file_stop) {
fprintf(stderr, "get stop_mcount_loc error!");
pclose(file_start);
return;
}
while (fgets(start_buff, sizeof(start_buff), file_start) != NULL) {
len = strlen(start_buff);
start_buff[len - 1] = '\0';
}
*_start = strtoul(start_buff, NULL, 16);
while (fgets(stop_buff, sizeof(stop_buff), file_stop) != NULL) {
len = strlen(stop_buff);
stop_buff[len - 1] = '\0';
}
*_stop = strtoul(stop_buff, NULL, 16);
pclose(file_start);
pclose(file_stop);
}
#endif
static int do_sort(Elf_Ehdr *ehdr,
char const *const fname,
table_sort_t custom_sort)
{
int rc = -1;
Elf_Shdr *s, *shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
Elf_Shdr *strtab_sec = NULL;
Elf_Shdr *symtab_sec = NULL;
Elf_Shdr *extab_sec = NULL;
Elf_Sym *sym;
const Elf_Sym *symtab;
Elf32_Word *symtab_shndx = NULL;
Elf_Sym *sort_needed_sym = NULL;
Elf_Shdr *sort_needed_sec;
Elf_Rel *relocs = NULL;
int relocs_size = 0;
uint32_t *sort_needed_loc;
const char *secstrings;
const char *strtab;
char *extab_image;
int extab_index = 0;
int i;
int idx;
unsigned int shnum;
unsigned int shstrndx;
#ifdef MCOUNT_SORT_ENABLED
struct elf_mcount_loc mstruct = {0};
uint_t _start_mcount_loc = 0;
uint_t _stop_mcount_loc = 0;
#endif
#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
unsigned int orc_ip_size = 0;
unsigned int orc_size = 0;
unsigned int orc_num_entries = 0;
#endif
shstrndx = r2(&ehdr->e_shstrndx);
if (shstrndx == SHN_XINDEX)
shstrndx = r(&shdr[0].sh_link);
secstrings = (const char *)ehdr + _r(&shdr[shstrndx].sh_offset);
shnum = r2(&ehdr->e_shnum);
if (shnum == SHN_UNDEF)
shnum = _r(&shdr[0].sh_size);
for (i = 0, s = shdr; s < shdr + shnum; i++, s++) {
idx = r(&s->sh_name);
if (!strcmp(secstrings + idx, "__ex_table")) {
extab_sec = s;
extab_index = i;
}
if (!strcmp(secstrings + idx, ".symtab"))
symtab_sec = s;
if (!strcmp(secstrings + idx, ".strtab"))
strtab_sec = s;
if ((r(&s->sh_type) == SHT_REL ||
r(&s->sh_type) == SHT_RELA) &&
r(&s->sh_info) == extab_index) {
relocs = (void *)ehdr + _r(&s->sh_offset);
relocs_size = _r(&s->sh_size);
}
if (r(&s->sh_type) == SHT_SYMTAB_SHNDX)
symtab_shndx = (Elf32_Word *)((const char *)ehdr +
_r(&s->sh_offset));
#ifdef MCOUNT_SORT_ENABLED
/* locate the .init.data section in vmlinux */
if (!strcmp(secstrings + idx, ".init.data")) {
get_mcount_loc(&_start_mcount_loc, &_stop_mcount_loc);
mstruct.ehdr = ehdr;
mstruct.init_data_sec = s;
mstruct.start_mcount_loc = _start_mcount_loc;
mstruct.stop_mcount_loc = _stop_mcount_loc;
}
#endif
#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
/* locate the ORC unwind tables */
if (!strcmp(secstrings + idx, ".orc_unwind_ip")) {
orc_ip_size = s->sh_size;
g_orc_ip_table = (int *)((void *)ehdr +
s->sh_offset);
}
if (!strcmp(secstrings + idx, ".orc_unwind")) {
orc_size = s->sh_size;
g_orc_table = (struct orc_entry *)((void *)ehdr +
s->sh_offset);
}
#endif
} /* for loop */
#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
if (!g_orc_ip_table || !g_orc_table) {
fprintf(stderr,
"incomplete ORC unwind tables in file: %s\n", fname);
goto out;
}
orc_num_entries = orc_ip_size / sizeof(int);
if (orc_ip_size % sizeof(int) != 0 ||
orc_size % sizeof(struct orc_entry) != 0 ||
orc_num_entries != orc_size / sizeof(struct orc_entry)) {
fprintf(stderr,
"inconsistent ORC unwind table entries in file: %s\n",
fname);
goto out;
}
/* create thread to sort ORC unwind tables concurrently */
if (pthread_create(&orc_sort_thread, NULL,
sort_orctable, &orc_ip_size)) {
fprintf(stderr,
"pthread_create orc_sort_thread failed '%s': %s\n",
strerror(errno), fname);
goto out;
}
#endif
#ifdef MCOUNT_SORT_ENABLED
if (!mstruct.init_data_sec || !_start_mcount_loc || !_stop_mcount_loc) {
fprintf(stderr,
"incomplete mcount's sort in file: %s\n",
fname);
goto out;
}
/* create thread to sort mcount_loc concurrently */
if (pthread_create(&mcount_sort_thread, NULL, &sort_mcount_loc, &mstruct)) {
fprintf(stderr,
"pthread_create mcount_sort_thread failed '%s': %s\n",
strerror(errno), fname);
goto out;
}
#endif
if (!extab_sec) {
fprintf(stderr, "no __ex_table in file: %s\n", fname);
goto out;
}
if (!symtab_sec) {
fprintf(stderr, "no .symtab in file: %s\n", fname);
goto out;
}
if (!strtab_sec) {
fprintf(stderr, "no .strtab in file: %s\n", fname);
goto out;
}
extab_image = (void *)ehdr + _r(&extab_sec->sh_offset);
strtab = (const char *)ehdr + _r(&strtab_sec->sh_offset);
symtab = (const Elf_Sym *)((const char *)ehdr +
_r(&symtab_sec->sh_offset));
if (custom_sort) {
custom_sort(extab_image, _r(&extab_sec->sh_size));
} else {
int num_entries = _r(&extab_sec->sh_size) / extable_ent_size;
qsort(extab_image, num_entries,
extable_ent_size, compare_extable);
}
/* If there were relocations, we no longer need them. */
if (relocs)
memset(relocs, 0, relocs_size);
/* find the flag main_extable_sort_needed */
for (sym = (void *)ehdr + _r(&symtab_sec->sh_offset);
sym < sym + _r(&symtab_sec->sh_size) / sizeof(Elf_Sym);
sym++) {
if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT)
continue;
if (!strcmp(strtab + r(&sym->st_name),
"main_extable_sort_needed")) {
sort_needed_sym = sym;
break;
}
}
if (!sort_needed_sym) {
fprintf(stderr,
"no main_extable_sort_needed symbol in file: %s\n",
fname);
goto out;
}
sort_needed_sec = &shdr[get_secindex(r2(&sym->st_shndx),
sort_needed_sym - symtab,
symtab_shndx)];
sort_needed_loc = (void *)ehdr +
_r(&sort_needed_sec->sh_offset) +
_r(&sort_needed_sym->st_value) -
_r(&sort_needed_sec->sh_addr);
/* extable has been sorted, clear the flag */
w(0, sort_needed_loc);
rc = 0;
out:
#if defined(SORTTABLE_64) && defined(UNWINDER_ORC_ENABLED)
if (orc_sort_thread) {
void *retval = NULL;
/* wait for ORC tables sort done */
rc = pthread_join(orc_sort_thread, &retval);
if (rc) {
fprintf(stderr,
"pthread_join failed '%s': %s\n",
strerror(errno), fname);
} else if (retval) {
rc = -1;
fprintf(stderr,
"failed to sort ORC tables '%s': %s\n",
(char *)retval, fname);
}
}
#endif
#ifdef MCOUNT_SORT_ENABLED
if (mcount_sort_thread) {
void *retval = NULL;
/* wait for mcount sort done */
rc = pthread_join(mcount_sort_thread, &retval);
if (rc) {
fprintf(stderr,
"pthread_join failed '%s': %s\n",
strerror(errno), fname);
} else if (retval) {
rc = -1;
fprintf(stderr,
"failed to sort mcount '%s': %s\n",
(char *)retval, fname);
}
}
#endif
return rc;
}

View File

@ -6,4 +6,6 @@ TEST_PROGS := ftracetest-ktap
TEST_FILES := test.d settings TEST_FILES := test.d settings
EXTRA_CLEAN := $(OUTPUT)/logs/* EXTRA_CLEAN := $(OUTPUT)/logs/*
TEST_GEN_PROGS = poll
include ../lib.mk include ../lib.mk

View File

@ -0,0 +1,74 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Simple poll on a file.
*
* Copyright (c) 2024 Google LLC.
*/
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#define BUFSIZE 4096
/*
* Usage:
* poll [-I|-P] [-t timeout] FILE
*/
int main(int argc, char *argv[])
{
struct pollfd pfd = {.events = POLLIN};
char buf[BUFSIZE];
int timeout = -1;
int ret, opt;
while ((opt = getopt(argc, argv, "IPt:")) != -1) {
switch (opt) {
case 'I':
pfd.events = POLLIN;
break;
case 'P':
pfd.events = POLLPRI;
break;
case 't':
timeout = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s [-I|-P] [-t timeout] FILE\n",
argv[0]);
return -1;
}
}
if (optind >= argc) {
fprintf(stderr, "Error: Polling file is not specified\n");
return -1;
}
pfd.fd = open(argv[optind], O_RDONLY);
if (pfd.fd < 0) {
fprintf(stderr, "failed to open %s", argv[optind]);
perror("open");
return -1;
}
/* Reset poll by read if POLLIN is specified. */
if (pfd.events & POLLIN)
do {} while (read(pfd.fd, buf, BUFSIZE) == BUFSIZE);
ret = poll(&pfd, 1, timeout);
if (ret < 0 && errno != EINTR) {
perror("poll");
return -1;
}
close(pfd.fd);
/* If timeout happned (ret == 0), exit code is 1 */
if (ret == 0)
return 1;
return 0;
}

View File

@ -0,0 +1,74 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: event trigger - test poll wait on histogram
# requires: set_event events/sched/sched_process_free/trigger events/sched/sched_process_free/hist
# flags: instance
POLL=${FTRACETEST_ROOT}/poll
if [ ! -x ${POLL} ]; then
echo "poll program is not compiled!"
exit_unresolved
fi
EVENT=events/sched/sched_process_free/
# Check poll ops is supported. Before implementing poll on hist file, it
# returns soon with POLLIN | POLLOUT, but not POLLPRI.
# This must wait >1 sec and return 1 (timeout).
set +e
${POLL} -I -t 1000 ${EVENT}/hist
ret=$?
set -e
if [ ${ret} != 1 ]; then
echo "poll on hist file is not supported"
exit_unsupported
fi
# Test POLLIN
echo > trace
echo 'hist:key=comm if comm =="sleep"' > ${EVENT}/trigger
echo 1 > ${EVENT}/enable
# This sleep command will exit after 2 seconds.
sleep 2 &
BGPID=$!
# if timeout happens, poll returns 1.
${POLL} -I -t 4000 ${EVENT}/hist
echo 0 > tracing_on
if [ -d /proc/${BGPID} ]; then
echo "poll exits too soon"
kill -KILL ${BGPID} ||:
exit_fail
fi
if ! grep -qw "sleep" trace; then
echo "poll exits before event happens"
exit_fail
fi
# Test POLLPRI
echo > trace
echo 1 > tracing_on
# This sleep command will exit after 2 seconds.
sleep 2 &
BGPID=$!
# if timeout happens, poll returns 1.
${POLL} -P -t 4000 ${EVENT}/hist
echo 0 > tracing_on
if [ -d /proc/${BGPID} ]; then
echo "poll exits too soon"
kill -KILL ${BGPID} ||:
exit_fail
fi
if ! grep -qw "sleep" trace; then
echo "poll exits before event happens"
exit_fail
fi
exit_pass

View File

@ -19,13 +19,14 @@ class Automata:
invalid_state_str = "INVALID_STATE" invalid_state_str = "INVALID_STATE"
def __init__(self, file_path): def __init__(self, file_path, model_name=None):
self.__dot_path = file_path self.__dot_path = file_path
self.name = self.__get_model_name() self.name = model_name or self.__get_model_name()
self.__dot_lines = self.__open_dot() self.__dot_lines = self.__open_dot()
self.states, self.initial_state, self.final_states = self.__get_state_variables() self.states, self.initial_state, self.final_states = self.__get_state_variables()
self.events = self.__get_event_variables() self.events = self.__get_event_variables()
self.function = self.__create_matrix() self.function = self.__create_matrix()
self.events_start, self.events_start_run = self.__store_init_events()
def __get_model_name(self): def __get_model_name(self):
basename = ntpath.basename(self.__dot_path) basename = ntpath.basename(self.__dot_path)
@ -172,3 +173,34 @@ class Automata:
cursor += 1 cursor += 1
return matrix return matrix
def __store_init_events(self):
events_start = [False] * len(self.events)
events_start_run = [False] * len(self.events)
for i, _ in enumerate(self.events):
curr_event_will_init = 0
curr_event_from_init = False
curr_event_used = 0
for j, _ in enumerate(self.states):
if self.function[j][i] != self.invalid_state_str:
curr_event_used += 1
if self.function[j][i] == self.initial_state:
curr_event_will_init += 1
if self.function[0][i] != self.invalid_state_str:
curr_event_from_init = True
# this event always leads to init
if curr_event_will_init and curr_event_used == curr_event_will_init:
events_start[i] = True
# this event is only called from init
if curr_event_from_init and curr_event_used == 1:
events_start_run[i] = True
return events_start, events_start_run
def is_start_event(self, event):
return self.events_start[self.events.index(event)]
def is_start_run_event(self, event):
# prefer handle_start_event if there
if any(self.events_start):
return False
return self.events_start_run[self.events.index(event)]

View File

@ -22,8 +22,8 @@ class Dot2c(Automata):
struct_automaton_def = "automaton" struct_automaton_def = "automaton"
var_automaton_def = "aut" var_automaton_def = "aut"
def __init__(self, file_path): def __init__(self, file_path, model_name=None):
super().__init__(file_path) super().__init__(file_path, model_name)
self.line_length = 100 self.line_length = 100
def __buff_to_string(self, buff): def __buff_to_string(self, buff):

View File

@ -21,25 +21,24 @@ if __name__ == '__main__':
parser.add_argument('-t', "--monitor_type", dest="monitor_type", required=True) parser.add_argument('-t', "--monitor_type", dest="monitor_type", required=True)
parser.add_argument('-n', "--model_name", dest="model_name", required=False) parser.add_argument('-n', "--model_name", dest="model_name", required=False)
parser.add_argument("-D", "--description", dest="description", required=False) parser.add_argument("-D", "--description", dest="description", required=False)
parser.add_argument("-a", "--auto_patch", dest="auto_patch",
action="store_true", required=False,
help="Patch the kernel in place")
params = parser.parse_args() params = parser.parse_args()
print("Opening and parsing the dot file %s" % params.dot_file) print("Opening and parsing the dot file %s" % params.dot_file)
try: try:
monitor=dot2k(params.dot_file, params.monitor_type) monitor=dot2k(params.dot_file, params.monitor_type, vars(params))
except Exception as e: except Exception as e:
print('Error: '+ str(e)) print('Error: '+ str(e))
print("Sorry : :-(") print("Sorry : :-(")
sys.exit(1) sys.exit(1)
# easier than using argparse action.
if params.model_name != None:
print(params.model_name)
print("Writing the monitor into the directory %s" % monitor.name) print("Writing the monitor into the directory %s" % monitor.name)
monitor.print_files() monitor.print_files()
print("Almost done, checklist") print("Almost done, checklist")
print(" - Edit the %s/%s.c to add the instrumentation" % (monitor.name, monitor.name)) print(" - Edit the %s/%s.c to add the instrumentation" % (monitor.name, monitor.name))
print(" - Edit include/trace/events/rv.h to add the tracepoint entry") print(monitor.fill_tracepoint_tooltip())
print(" - Move it to the kernel's monitor directory") print(monitor.fill_makefile_tooltip())
print(" - Edit kernel/trace/rv/Makefile") print(monitor.fill_kconfig_tooltip())
print(" - Edit kernel/trace/rv/Kconfig") print(monitor.fill_monitor_tooltip())

View File

@ -14,50 +14,83 @@ import os
class dot2k(Dot2c): class dot2k(Dot2c):
monitor_types = { "global" : 1, "per_cpu" : 2, "per_task" : 3 } monitor_types = { "global" : 1, "per_cpu" : 2, "per_task" : 3 }
monitor_templates_dir = "dot2k/rv_templates/" monitor_templates_dir = "dot2/dot2k_templates/"
rv_dir = "kernel/trace/rv"
monitor_type = "per_cpu" monitor_type = "per_cpu"
def __init__(self, file_path, MonitorType): def __init__(self, file_path, MonitorType, extra_params={}):
super().__init__(file_path) super().__init__(file_path, extra_params.get("model_name"))
self.monitor_type = self.monitor_types.get(MonitorType) self.monitor_type = self.monitor_types.get(MonitorType)
if self.monitor_type == None: if self.monitor_type is None:
raise Exception("Unknown monitor type: %s" % MonitorType) raise ValueError("Unknown monitor type: %s" % MonitorType)
self.monitor_type = MonitorType self.monitor_type = MonitorType
self.__fill_rv_templates_dir() self.__fill_rv_templates_dir()
self.main_c = self.__open_file(self.monitor_templates_dir + "main_" + MonitorType + ".c") self.main_c = self.__read_file(self.monitor_templates_dir + "main.c")
self.trace_h = self.__read_file(self.monitor_templates_dir + "trace.h")
self.kconfig = self.__read_file(self.monitor_templates_dir + "Kconfig")
self.enum_suffix = "_%s" % self.name self.enum_suffix = "_%s" % self.name
self.description = extra_params.get("description", self.name) or "auto-generated"
self.auto_patch = extra_params.get("auto_patch")
if self.auto_patch:
self.__fill_rv_kernel_dir()
def __fill_rv_templates_dir(self): def __fill_rv_templates_dir(self):
if os.path.exists(self.monitor_templates_dir) == True: if os.path.exists(self.monitor_templates_dir):
return return
if platform.system() != "Linux": if platform.system() != "Linux":
raise Exception("I can only run on Linux.") raise OSError("I can only run on Linux.")
kernel_path = "/lib/modules/%s/build/tools/verification/dot2/dot2k_templates/" % (platform.release()) kernel_path = "/lib/modules/%s/build/tools/verification/dot2/dot2k_templates/" % (platform.release())
if os.path.exists(kernel_path) == True: if os.path.exists(kernel_path):
self.monitor_templates_dir = kernel_path self.monitor_templates_dir = kernel_path
return return
if os.path.exists("/usr/share/dot2/dot2k_templates/") == True: if os.path.exists("/usr/share/dot2/dot2k_templates/"):
self.monitor_templates_dir = "/usr/share/dot2/dot2k_templates/" self.monitor_templates_dir = "/usr/share/dot2/dot2k_templates/"
return return
raise Exception("Could not find the template directory, do you have the kernel source installed?") raise FileNotFoundError("Could not find the template directory, do you have the kernel source installed?")
def __fill_rv_kernel_dir(self):
def __open_file(self, path): # first try if we are running in the kernel tree root
if os.path.exists(self.rv_dir):
return
# offset if we are running inside the kernel tree from verification/dot2
kernel_path = os.path.join("../..", self.rv_dir)
if os.path.exists(kernel_path):
self.rv_dir = kernel_path
return
if platform.system() != "Linux":
raise OSError("I can only run on Linux.")
kernel_path = os.path.join("/lib/modules/%s/build" % platform.release(), self.rv_dir)
# if the current kernel is from a distro this may not be a full kernel tree
# verify that one of the files we are going to modify is available
if os.path.exists(os.path.join(kernel_path, "rv_trace.h")):
self.rv_dir = kernel_path
return
raise FileNotFoundError("Could not find the rv directory, do you have the kernel source installed?")
def __read_file(self, path):
try: try:
fd = open(path) fd = open(path, 'r')
except OSError: except OSError:
raise Exception("Cannot open the file: %s" % path) raise Exception("Cannot open the file: %s" % path)
content = fd.read() content = fd.read()
fd.close()
return content return content
def __buff_to_string(self, buff): def __buff_to_string(self, buff):
@ -69,16 +102,26 @@ class dot2k(Dot2c):
# cut off the last \n # cut off the last \n
return string[:-1] return string[:-1]
def fill_monitor_type(self):
return self.monitor_type.upper()
def fill_tracepoint_handlers_skel(self): def fill_tracepoint_handlers_skel(self):
buff = [] buff = []
for event in self.events: for event in self.events:
buff.append("static void handle_%s(void *data, /* XXX: fill header */)" % event) buff.append("static void handle_%s(void *data, /* XXX: fill header */)" % event)
buff.append("{") buff.append("{")
handle = "handle_event"
if self.is_start_event(event):
buff.append("\t/* XXX: validate that this event always leads to the initial state */")
handle = "handle_start_event"
elif self.is_start_run_event(event):
buff.append("\t/* XXX: validate that this event is only valid in the initial state */")
handle = "handle_start_run_event"
if self.monitor_type == "per_task": if self.monitor_type == "per_task":
buff.append("\tstruct task_struct *p = /* XXX: how do I get p? */;"); buff.append("\tstruct task_struct *p = /* XXX: how do I get p? */;");
buff.append("\tda_handle_event_%s(p, %s%s);" % (self.name, event, self.enum_suffix)); buff.append("\tda_%s_%s(p, %s%s);" % (handle, self.name, event, self.enum_suffix));
else: else:
buff.append("\tda_handle_event_%s(%s%s);" % (self.name, event, self.enum_suffix)); buff.append("\tda_%s_%s(%s%s);" % (handle, self.name, event, self.enum_suffix));
buff.append("}") buff.append("}")
buff.append("") buff.append("")
return self.__buff_to_string(buff) return self.__buff_to_string(buff)
@ -97,18 +140,21 @@ class dot2k(Dot2c):
def fill_main_c(self): def fill_main_c(self):
main_c = self.main_c main_c = self.main_c
monitor_type = self.fill_monitor_type()
min_type = self.get_minimun_type() min_type = self.get_minimun_type()
nr_events = self.events.__len__() nr_events = len(self.events)
tracepoint_handlers = self.fill_tracepoint_handlers_skel() tracepoint_handlers = self.fill_tracepoint_handlers_skel()
tracepoint_attach = self.fill_tracepoint_attach_probe() tracepoint_attach = self.fill_tracepoint_attach_probe()
tracepoint_detach = self.fill_tracepoint_detach_helper() tracepoint_detach = self.fill_tracepoint_detach_helper()
main_c = main_c.replace("MIN_TYPE", min_type) main_c = main_c.replace("%%MONITOR_TYPE%%", monitor_type)
main_c = main_c.replace("MODEL_NAME", self.name) main_c = main_c.replace("%%MIN_TYPE%%", min_type)
main_c = main_c.replace("NR_EVENTS", str(nr_events)) main_c = main_c.replace("%%MODEL_NAME%%", self.name)
main_c = main_c.replace("TRACEPOINT_HANDLERS_SKEL", tracepoint_handlers) main_c = main_c.replace("%%NR_EVENTS%%", str(nr_events))
main_c = main_c.replace("TRACEPOINT_ATTACH", tracepoint_attach) main_c = main_c.replace("%%TRACEPOINT_HANDLERS_SKEL%%", tracepoint_handlers)
main_c = main_c.replace("TRACEPOINT_DETACH", tracepoint_detach) main_c = main_c.replace("%%TRACEPOINT_ATTACH%%", tracepoint_attach)
main_c = main_c.replace("%%TRACEPOINT_DETACH%%", tracepoint_detach)
main_c = main_c.replace("%%DESCRIPTION%%", self.description)
return main_c return main_c
@ -137,30 +183,141 @@ class dot2k(Dot2c):
return self.__buff_to_string(buff) return self.__buff_to_string(buff)
def fill_monitor_class_type(self):
if self.monitor_type == "per_task":
return "DA_MON_EVENTS_ID"
return "DA_MON_EVENTS_IMPLICIT"
def fill_monitor_class(self):
if self.monitor_type == "per_task":
return "da_monitor_id"
return "da_monitor"
def fill_tracepoint_args_skel(self, tp_type):
buff = []
tp_args_event = [
("char *", "state"),
("char *", "event"),
("char *", "next_state"),
("bool ", "final_state"),
]
tp_args_error = [
("char *", "state"),
("char *", "event"),
]
tp_args_id = ("int ", "id")
tp_args = tp_args_event if tp_type == "event" else tp_args_error
if self.monitor_type == "per_task":
tp_args.insert(0, tp_args_id)
tp_proto_c = ", ".join([a+b for a,b in tp_args])
tp_args_c = ", ".join([b for a,b in tp_args])
buff.append(" TP_PROTO(%s)," % tp_proto_c)
buff.append(" TP_ARGS(%s)" % tp_args_c)
return self.__buff_to_string(buff)
def fill_trace_h(self):
trace_h = self.trace_h
monitor_class = self.fill_monitor_class()
monitor_class_type = self.fill_monitor_class_type()
tracepoint_args_skel_event = self.fill_tracepoint_args_skel("event")
tracepoint_args_skel_error = self.fill_tracepoint_args_skel("error")
trace_h = trace_h.replace("%%MODEL_NAME%%", self.name)
trace_h = trace_h.replace("%%MODEL_NAME_UP%%", self.name.upper())
trace_h = trace_h.replace("%%MONITOR_CLASS%%", monitor_class)
trace_h = trace_h.replace("%%MONITOR_CLASS_TYPE%%", monitor_class_type)
trace_h = trace_h.replace("%%TRACEPOINT_ARGS_SKEL_EVENT%%", tracepoint_args_skel_event)
trace_h = trace_h.replace("%%TRACEPOINT_ARGS_SKEL_ERROR%%", tracepoint_args_skel_error)
return trace_h
def fill_kconfig(self):
kconfig = self.kconfig
monitor_class_type = self.fill_monitor_class_type()
kconfig = kconfig.replace("%%MODEL_NAME%%", self.name)
kconfig = kconfig.replace("%%MODEL_NAME_UP%%", self.name.upper())
kconfig = kconfig.replace("%%MONITOR_CLASS_TYPE%%", monitor_class_type)
kconfig = kconfig.replace("%%DESCRIPTION%%", self.description)
return kconfig
def __patch_file(self, file, marker, line):
file_to_patch = os.path.join(self.rv_dir, file)
content = self.__read_file(file_to_patch)
content = content.replace(marker, line + "\n" + marker)
self.__write_file(file_to_patch, content)
def fill_tracepoint_tooltip(self):
monitor_class_type = self.fill_monitor_class_type()
if self.auto_patch:
self.__patch_file("rv_trace.h",
"// Add new monitors based on CONFIG_%s here" % monitor_class_type,
"#include <monitors/%s/%s_trace.h>" % (self.name, self.name))
return " - Patching %s/rv_trace.h, double check the result" % self.rv_dir
return """ - Edit %s/rv_trace.h:
Add this line where other tracepoints are included and %s is defined:
#include <monitors/%s/%s_trace.h>
""" % (self.rv_dir, monitor_class_type, self.name, self.name)
def fill_kconfig_tooltip(self):
if self.auto_patch:
self.__patch_file("Kconfig",
"# Add new monitors here",
"source \"kernel/trace/rv/monitors/%s/Kconfig\"" % (self.name))
return " - Patching %s/Kconfig, double check the result" % self.rv_dir
return """ - Edit %s/Kconfig:
Add this line where other monitors are included:
source \"kernel/trace/rv/monitors/%s/Kconfig\"
""" % (self.rv_dir, self.name)
def fill_makefile_tooltip(self):
name = self.name
name_up = name.upper()
if self.auto_patch:
self.__patch_file("Makefile",
"# Add new monitors here",
"obj-$(CONFIG_RV_MON_%s) += monitors/%s/%s.o" % (name_up, name, name))
return " - Patching %s/Makefile, double check the result" % self.rv_dir
return """ - Edit %s/Makefile:
Add this line where other monitors are included:
obj-$(CONFIG_RV_MON_%s) += monitors/%s/%s.o
""" % (self.rv_dir, name_up, name, name)
def fill_monitor_tooltip(self):
if self.auto_patch:
return " - Monitor created in %s/monitors/%s" % (self.rv_dir, self. name)
return " - Move %s/ to the kernel's monitor directory (%s/monitors)" % (self.name, self.rv_dir)
def __create_directory(self): def __create_directory(self):
path = self.name
if self.auto_patch:
path = os.path.join(self.rv_dir, "monitors", path)
try: try:
os.mkdir(self.name) os.mkdir(path)
except FileExistsError: except FileExistsError:
return return
except: except:
print("Fail creating the output dir: %s" % self.name) print("Fail creating the output dir: %s" % self.name)
def __create_file(self, file_name, content): def __write_file(self, file_name, content):
path = "%s/%s" % (self.name, file_name)
try: try:
file = open(path, 'w') file = open(file_name, 'w')
except FileExistsError:
return
except: except:
print("Fail creating file: %s" % path) print("Fail writing to file: %s" % file_name)
file.write(content) file.write(content)
file.close() file.close()
def __create_file(self, file_name, content):
path = "%s/%s" % (self.name, file_name)
if self.auto_patch:
path = os.path.join(self.rv_dir, "monitors", path)
self.__write_file(path, content)
def __get_main_name(self): def __get_main_name(self):
path = "%s/%s" % (self.name, "main.c") path = "%s/%s" % (self.name, "main.c")
if os.path.exists(path) == False: if not os.path.exists(path):
return "main.c" return "main.c"
return "__main.c" return "__main.c"
@ -175,3 +332,10 @@ class dot2k(Dot2c):
path = "%s.h" % self.name path = "%s.h" % self.name
self.__create_file(path, model_h) self.__create_file(path, model_h)
trace_h = self.fill_trace_h()
path = "%s_trace.h" % self.name
self.__create_file(path, trace_h)
kconfig = self.fill_kconfig()
self.__create_file("Kconfig", kconfig)

View File

@ -0,0 +1,6 @@
config RV_MON_%%MODEL_NAME_UP%%
depends on RV
select %%MONITOR_CLASS_TYPE%%
bool "%%MODEL_NAME%% monitor"
help
%%DESCRIPTION%%

View File

@ -0,0 +1,91 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ftrace.h>
#include <linux/tracepoint.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rv.h>
#include <rv/instrumentation.h>
#include <rv/da_monitor.h>
#define MODULE_NAME "%%MODEL_NAME%%"
/*
* XXX: include required tracepoint headers, e.g.,
* #include <trace/events/sched.h>
*/
#include <rv_trace.h>
/*
* This is the self-generated part of the monitor. Generally, there is no need
* to touch this section.
*/
#include "%%MODEL_NAME%%.h"
/*
* Declare the deterministic automata monitor.
*
* The rv monitor reference is needed for the monitor declaration.
*/
static struct rv_monitor rv_%%MODEL_NAME%%;
DECLARE_DA_MON_%%MONITOR_TYPE%%(%%MODEL_NAME%%, %%MIN_TYPE%%);
/*
* This is the instrumentation part of the monitor.
*
* This is the section where manual work is required. Here the kernel events
* are translated into model's event.
*
*/
%%TRACEPOINT_HANDLERS_SKEL%%
static int enable_%%MODEL_NAME%%(void)
{
int retval;
retval = da_monitor_init_%%MODEL_NAME%%();
if (retval)
return retval;
%%TRACEPOINT_ATTACH%%
return 0;
}
static void disable_%%MODEL_NAME%%(void)
{
rv_%%MODEL_NAME%%.enabled = 0;
%%TRACEPOINT_DETACH%%
da_monitor_destroy_%%MODEL_NAME%%();
}
/*
* This is the monitor register section.
*/
static struct rv_monitor rv_%%MODEL_NAME%% = {
.name = "%%MODEL_NAME%%",
.description = "%%DESCRIPTION%%",
.enable = enable_%%MODEL_NAME%%,
.disable = disable_%%MODEL_NAME%%,
.reset = da_monitor_reset_all_%%MODEL_NAME%%,
.enabled = 0,
};
static int __init register_%%MODEL_NAME%%(void)
{
rv_register_monitor(&rv_%%MODEL_NAME%%);
return 0;
}
static void __exit unregister_%%MODEL_NAME%%(void)
{
rv_unregister_monitor(&rv_%%MODEL_NAME%%);
}
module_init(register_%%MODEL_NAME%%);
module_exit(unregister_%%MODEL_NAME%%);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("dot2k: auto-generated");
MODULE_DESCRIPTION("%%MODEL_NAME%%: %%DESCRIPTION%%");

View File

@ -1,91 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ftrace.h>
#include <linux/tracepoint.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rv.h>
#include <rv/instrumentation.h>
#include <rv/da_monitor.h>
#define MODULE_NAME "MODEL_NAME"
/*
* XXX: include required tracepoint headers, e.g.,
* #include <trace/events/sched.h>
*/
#include <trace/events/rv.h>
/*
* This is the self-generated part of the monitor. Generally, there is no need
* to touch this section.
*/
#include "MODEL_NAME.h"
/*
* Declare the deterministic automata monitor.
*
* The rv monitor reference is needed for the monitor declaration.
*/
static struct rv_monitor rv_MODEL_NAME;
DECLARE_DA_MON_GLOBAL(MODEL_NAME, MIN_TYPE);
/*
* This is the instrumentation part of the monitor.
*
* This is the section where manual work is required. Here the kernel events
* are translated into model's event.
*
*/
TRACEPOINT_HANDLERS_SKEL
static int enable_MODEL_NAME(void)
{
int retval;
retval = da_monitor_init_MODEL_NAME();
if (retval)
return retval;
TRACEPOINT_ATTACH
return 0;
}
static void disable_MODEL_NAME(void)
{
rv_MODEL_NAME.enabled = 0;
TRACEPOINT_DETACH
da_monitor_destroy_MODEL_NAME();
}
/*
* This is the monitor register section.
*/
static struct rv_monitor rv_MODEL_NAME = {
.name = "MODEL_NAME",
.description = "auto-generated MODEL_NAME",
.enable = enable_MODEL_NAME,
.disable = disable_MODEL_NAME,
.reset = da_monitor_reset_all_MODEL_NAME,
.enabled = 0,
};
static int __init register_MODEL_NAME(void)
{
rv_register_monitor(&rv_MODEL_NAME);
return 0;
}
static void __exit unregister_MODEL_NAME(void)
{
rv_unregister_monitor(&rv_MODEL_NAME);
}
module_init(register_MODEL_NAME);
module_exit(unregister_MODEL_NAME);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("dot2k: auto-generated");
MODULE_DESCRIPTION("MODEL_NAME");

View File

@ -1,91 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ftrace.h>
#include <linux/tracepoint.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rv.h>
#include <rv/instrumentation.h>
#include <rv/da_monitor.h>
#define MODULE_NAME "MODEL_NAME"
/*
* XXX: include required tracepoint headers, e.g.,
* #include <linux/trace/events/sched.h>
*/
#include <trace/events/rv.h>
/*
* This is the self-generated part of the monitor. Generally, there is no need
* to touch this section.
*/
#include "MODEL_NAME.h"
/*
* Declare the deterministic automata monitor.
*
* The rv monitor reference is needed for the monitor declaration.
*/
static struct rv_monitor rv_MODEL_NAME;
DECLARE_DA_MON_PER_CPU(MODEL_NAME, MIN_TYPE);
/*
* This is the instrumentation part of the monitor.
*
* This is the section where manual work is required. Here the kernel events
* are translated into model's event.
*
*/
TRACEPOINT_HANDLERS_SKEL
static int enable_MODEL_NAME(void)
{
int retval;
retval = da_monitor_init_MODEL_NAME();
if (retval)
return retval;
TRACEPOINT_ATTACH
return 0;
}
static void disable_MODEL_NAME(void)
{
rv_MODEL_NAME.enabled = 0;
TRACEPOINT_DETACH
da_monitor_destroy_MODEL_NAME();
}
/*
* This is the monitor register section.
*/
static struct rv_monitor rv_MODEL_NAME = {
.name = "MODEL_NAME",
.description = "auto-generated MODEL_NAME",
.enable = enable_MODEL_NAME,
.disable = disable_MODEL_NAME,
.reset = da_monitor_reset_all_MODEL_NAME,
.enabled = 0,
};
static int __init register_MODEL_NAME(void)
{
rv_register_monitor(&rv_MODEL_NAME);
return 0;
}
static void __exit unregister_MODEL_NAME(void)
{
rv_unregister_monitor(&rv_MODEL_NAME);
}
module_init(register_MODEL_NAME);
module_exit(unregister_MODEL_NAME);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("dot2k: auto-generated");
MODULE_DESCRIPTION("MODEL_NAME");

View File

@ -1,91 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ftrace.h>
#include <linux/tracepoint.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/rv.h>
#include <rv/instrumentation.h>
#include <rv/da_monitor.h>
#define MODULE_NAME "MODEL_NAME"
/*
* XXX: include required tracepoint headers, e.g.,
* #include <linux/trace/events/sched.h>
*/
#include <trace/events/rv.h>
/*
* This is the self-generated part of the monitor. Generally, there is no need
* to touch this section.
*/
#include "MODEL_NAME.h"
/*
* Declare the deterministic automata monitor.
*
* The rv monitor reference is needed for the monitor declaration.
*/
static struct rv_monitor rv_MODEL_NAME;
DECLARE_DA_MON_PER_TASK(MODEL_NAME, MIN_TYPE);
/*
* This is the instrumentation part of the monitor.
*
* This is the section where manual work is required. Here the kernel events
* are translated into model's event.
*
*/
TRACEPOINT_HANDLERS_SKEL
static int enable_MODEL_NAME(void)
{
int retval;
retval = da_monitor_init_MODEL_NAME();
if (retval)
return retval;
TRACEPOINT_ATTACH
return 0;
}
static void disable_MODEL_NAME(void)
{
rv_MODEL_NAME.enabled = 0;
TRACEPOINT_DETACH
da_monitor_destroy_MODEL_NAME();
}
/*
* This is the monitor register section.
*/
static struct rv_monitor rv_MODEL_NAME = {
.name = "MODEL_NAME",
.description = "auto-generated MODEL_NAME",
.enable = enable_MODEL_NAME,
.disable = disable_MODEL_NAME,
.reset = da_monitor_reset_all_MODEL_NAME,
.enabled = 0,
};
static int __init register_MODEL_NAME(void)
{
rv_register_monitor(&rv_MODEL_NAME);
return 0;
}
static void __exit unregister_MODEL_NAME(void)
{
rv_unregister_monitor(&rv_MODEL_NAME);
}
module_init(register_MODEL_NAME);
module_exit(unregister_MODEL_NAME);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("dot2k: auto-generated");
MODULE_DESCRIPTION("MODEL_NAME");

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Snippet to be included in rv_trace.h
*/
#ifdef CONFIG_RV_MON_%%MODEL_NAME_UP%%
DEFINE_EVENT(event_%%MONITOR_CLASS%%, event_%%MODEL_NAME%%,
%%TRACEPOINT_ARGS_SKEL_EVENT%%);
DEFINE_EVENT(error_%%MONITOR_CLASS%%, error_%%MODEL_NAME%%,
%%TRACEPOINT_ARGS_SKEL_ERROR%%);
#endif /* CONFIG_RV_MON_%%MODEL_NAME_UP%% */