Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace.git

This commit is contained in:
Stephen Rothwell 2024-12-20 13:32:54 +11:00
commit ae884f86d7
5 changed files with 149 additions and 192 deletions

View File

@ -39,6 +39,7 @@
#include <linux/static_call.h> #include <linux/static_call.h>
#include <linux/perf_event.h> #include <linux/perf_event.h>
#include <linux/execmem.h> #include <linux/execmem.h>
#include <linux/cleanup.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
@ -140,10 +141,9 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c);
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
{ {
struct kprobe_insn_page *kip; struct kprobe_insn_page *kip;
kprobe_opcode_t *slot = NULL;
/* Since the slot array is not protected by rcu, we need a mutex */ /* Since the slot array is not protected by rcu, we need a mutex */
mutex_lock(&c->mutex); guard(mutex)(&c->mutex);
retry: retry:
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(kip, &c->pages, list) { list_for_each_entry_rcu(kip, &c->pages, list) {
@ -154,9 +154,8 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
if (kip->slot_used[i] == SLOT_CLEAN) { if (kip->slot_used[i] == SLOT_CLEAN) {
kip->slot_used[i] = SLOT_USED; kip->slot_used[i] = SLOT_USED;
kip->nused++; kip->nused++;
slot = kip->insns + (i * c->insn_size);
rcu_read_unlock(); rcu_read_unlock();
goto out; return kip->insns + (i * c->insn_size);
} }
} }
/* kip->nused is broken. Fix it. */ /* kip->nused is broken. Fix it. */
@ -173,12 +172,12 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
/* All out of space. Need to allocate a new page. */ /* All out of space. Need to allocate a new page. */
kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL); kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL);
if (!kip) if (!kip)
goto out; return NULL;
kip->insns = c->alloc(); kip->insns = c->alloc();
if (!kip->insns) { if (!kip->insns) {
kfree(kip); kfree(kip);
goto out; return NULL;
} }
INIT_LIST_HEAD(&kip->list); INIT_LIST_HEAD(&kip->list);
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
@ -187,14 +186,12 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
kip->ngarbage = 0; kip->ngarbage = 0;
kip->cache = c; kip->cache = c;
list_add_rcu(&kip->list, &c->pages); list_add_rcu(&kip->list, &c->pages);
slot = kip->insns;
/* Record the perf ksymbol register event after adding the page */ /* Record the perf ksymbol register event after adding the page */
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
PAGE_SIZE, false, c->sym); PAGE_SIZE, false, c->sym);
out:
mutex_unlock(&c->mutex); return kip->insns;
return slot;
} }
/* Return true if all garbages are collected, otherwise false. */ /* Return true if all garbages are collected, otherwise false. */
@ -255,7 +252,7 @@ void __free_insn_slot(struct kprobe_insn_cache *c,
struct kprobe_insn_page *kip; struct kprobe_insn_page *kip;
long idx; long idx;
mutex_lock(&c->mutex); guard(mutex)(&c->mutex);
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(kip, &c->pages, list) { list_for_each_entry_rcu(kip, &c->pages, list) {
idx = ((long)slot - (long)kip->insns) / idx = ((long)slot - (long)kip->insns) /
@ -281,7 +278,6 @@ out:
collect_one_slot(kip, idx); collect_one_slot(kip, idx);
} }
} }
mutex_unlock(&c->mutex);
} }
/* /*
@ -637,10 +633,9 @@ static void kprobe_optimizer(struct work_struct *work)
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
} }
/* Wait for completing optimization and unoptimization */ static void wait_for_kprobe_optimizer_locked(void)
void wait_for_kprobe_optimizer(void)
{ {
mutex_lock(&kprobe_mutex); lockdep_assert_held(&kprobe_mutex);
while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
@ -652,8 +647,14 @@ void wait_for_kprobe_optimizer(void)
mutex_lock(&kprobe_mutex); mutex_lock(&kprobe_mutex);
} }
}
mutex_unlock(&kprobe_mutex); /* Wait for completing optimization and unoptimization */
void wait_for_kprobe_optimizer(void)
{
guard(mutex)(&kprobe_mutex);
wait_for_kprobe_optimizer_locked();
} }
bool optprobe_queued_unopt(struct optimized_kprobe *op) bool optprobe_queued_unopt(struct optimized_kprobe *op)
@ -883,10 +884,10 @@ static void optimize_all_kprobes(void)
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* If optimization is already allowed, just return. */ /* If optimization is already allowed, just return. */
if (kprobes_allow_optimization) if (kprobes_allow_optimization)
goto out; return;
cpus_read_lock(); cpus_read_lock();
kprobes_allow_optimization = true; kprobes_allow_optimization = true;
@ -898,8 +899,6 @@ static void optimize_all_kprobes(void)
} }
cpus_read_unlock(); cpus_read_unlock();
pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
out:
mutex_unlock(&kprobe_mutex);
} }
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
@ -909,12 +908,10 @@ static void unoptimize_all_kprobes(void)
struct kprobe *p; struct kprobe *p;
unsigned int i; unsigned int i;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* If optimization is already prohibited, just return. */ /* If optimization is already prohibited, just return. */
if (!kprobes_allow_optimization) { if (!kprobes_allow_optimization)
mutex_unlock(&kprobe_mutex);
return; return;
}
cpus_read_lock(); cpus_read_lock();
kprobes_allow_optimization = false; kprobes_allow_optimization = false;
@ -926,10 +923,8 @@ static void unoptimize_all_kprobes(void)
} }
} }
cpus_read_unlock(); cpus_read_unlock();
mutex_unlock(&kprobe_mutex);
/* Wait for unoptimizing completion. */ /* Wait for unoptimizing completion. */
wait_for_kprobe_optimizer(); wait_for_kprobe_optimizer_locked();
pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
} }
@ -941,7 +936,7 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table,
{ {
int ret; int ret;
mutex_lock(&kprobe_sysctl_mutex); guard(mutex)(&kprobe_sysctl_mutex);
sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
ret = proc_dointvec_minmax(table, write, buffer, length, ppos); ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
@ -949,7 +944,6 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table,
optimize_all_kprobes(); optimize_all_kprobes();
else else
unoptimize_all_kprobes(); unoptimize_all_kprobes();
mutex_unlock(&kprobe_sysctl_mutex);
return ret; return ret;
} }
@ -1024,7 +1018,8 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
#define __arm_kprobe(p) arch_arm_kprobe(p) #define __arm_kprobe(p) arch_arm_kprobe(p)
#define __disarm_kprobe(p, o) arch_disarm_kprobe(p) #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
#define kprobe_disarmed(p) kprobe_disabled(p) #define kprobe_disarmed(p) kprobe_disabled(p)
#define wait_for_kprobe_optimizer() do {} while (0) #define wait_for_kprobe_optimizer_locked() \
lockdep_assert_held(&kprobe_mutex)
static int reuse_unused_kprobe(struct kprobe *ap) static int reuse_unused_kprobe(struct kprobe *ap)
{ {
@ -1488,6 +1483,7 @@ invalid:
static kprobe_opcode_t *kprobe_addr(struct kprobe *p) static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
{ {
bool on_func_entry; bool on_func_entry;
return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
} }
@ -1522,14 +1518,12 @@ valid:
*/ */
static inline int warn_kprobe_rereg(struct kprobe *p) static inline int warn_kprobe_rereg(struct kprobe *p)
{ {
int ret = 0; guard(mutex)(&kprobe_mutex);
mutex_lock(&kprobe_mutex);
if (WARN_ON_ONCE(__get_valid_kprobe(p))) if (WARN_ON_ONCE(__get_valid_kprobe(p)))
ret = -EINVAL; return -EINVAL;
mutex_unlock(&kprobe_mutex);
return ret; return 0;
} }
static int check_ftrace_location(struct kprobe *p) static int check_ftrace_location(struct kprobe *p)
@ -1566,16 +1560,25 @@ static int check_kprobe_address_safe(struct kprobe *p,
if (ret) if (ret)
return ret; return ret;
jump_label_lock(); jump_label_lock();
preempt_disable();
/* Ensure the address is in a text area, and find a module if exists. */ /* Ensure the address is in a text area, and find a module if exists. */
*probed_mod = NULL; *probed_mod = NULL;
if (!core_kernel_text((unsigned long) p->addr)) { if (!core_kernel_text((unsigned long) p->addr)) {
guard(preempt)();
*probed_mod = __module_text_address((unsigned long) p->addr); *probed_mod = __module_text_address((unsigned long) p->addr);
if (!(*probed_mod)) { if (!(*probed_mod)) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
/*
* We must hold a refcount of the probed module while updating
* its code to prohibit unexpected unloading.
*/
if (unlikely(!try_module_get(*probed_mod))) {
ret = -ENOENT;
goto out;
}
} }
/* Ensure it is not in reserved area. */ /* Ensure it is not in reserved area. */
if (in_gate_area_no_mm((unsigned long) p->addr) || if (in_gate_area_no_mm((unsigned long) p->addr) ||
@ -1584,21 +1587,13 @@ static int check_kprobe_address_safe(struct kprobe *p,
static_call_text_reserved(p->addr, p->addr) || static_call_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr) || find_bug((unsigned long)p->addr) ||
is_cfi_preamble_symbol((unsigned long)p->addr)) { is_cfi_preamble_symbol((unsigned long)p->addr)) {
module_put(*probed_mod);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
/* Get module refcount and reject __init functions for loaded modules. */ /* Get module refcount and reject __init functions for loaded modules. */
if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) { if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
/*
* We must hold a refcount of the probed module while updating
* its code to prohibit unexpected unloading.
*/
if (unlikely(!try_module_get(*probed_mod))) {
ret = -ENOENT;
goto out;
}
/* /*
* If the module freed '.init.text', we couldn't insert * If the module freed '.init.text', we couldn't insert
* kprobes in there. * kprobes in there.
@ -1606,27 +1601,62 @@ static int check_kprobe_address_safe(struct kprobe *p,
if (within_module_init((unsigned long)p->addr, *probed_mod) && if (within_module_init((unsigned long)p->addr, *probed_mod) &&
!module_is_coming(*probed_mod)) { !module_is_coming(*probed_mod)) {
module_put(*probed_mod); module_put(*probed_mod);
*probed_mod = NULL;
ret = -ENOENT; ret = -ENOENT;
} }
} }
out: out:
preempt_enable();
jump_label_unlock(); jump_label_unlock();
return ret; return ret;
} }
int register_kprobe(struct kprobe *p) static int __register_kprobe(struct kprobe *p)
{ {
int ret; int ret;
struct kprobe *old_p; struct kprobe *old_p;
guard(mutex)(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (old_p)
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
return register_aggr_kprobe(old_p, p);
cpus_read_lock();
/* Prevent text modification */
mutex_lock(&text_mutex);
ret = prepare_kprobe(p);
mutex_unlock(&text_mutex);
cpus_read_unlock();
if (ret)
return ret;
INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
ret = arm_kprobe(p);
if (ret) {
hlist_del_rcu(&p->hlist);
synchronize_rcu();
}
}
/* Try to optimize kprobe */
try_to_optimize_kprobe(p);
return 0;
}
int register_kprobe(struct kprobe *p)
{
int ret;
struct module *probed_mod; struct module *probed_mod;
kprobe_opcode_t *addr; kprobe_opcode_t *addr;
bool on_func_entry; bool on_func_entry;
/* Adjust probe address from symbol */ /* Canonicalize probe address from symbol */
addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
if (IS_ERR(addr)) if (IS_ERR(addr))
return PTR_ERR(addr); return PTR_ERR(addr);
@ -1638,6 +1668,8 @@ int register_kprobe(struct kprobe *p)
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
p->flags &= KPROBE_FLAG_DISABLED; p->flags &= KPROBE_FLAG_DISABLED;
if (on_func_entry)
p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
p->nmissed = 0; p->nmissed = 0;
INIT_LIST_HEAD(&p->list); INIT_LIST_HEAD(&p->list);
@ -1645,44 +1677,7 @@ int register_kprobe(struct kprobe *p)
if (ret) if (ret)
return ret; return ret;
mutex_lock(&kprobe_mutex); ret = __register_kprobe(p);
if (on_func_entry)
p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
old_p = get_kprobe(p->addr);
if (old_p) {
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
ret = register_aggr_kprobe(old_p, p);
goto out;
}
cpus_read_lock();
/* Prevent text modification */
mutex_lock(&text_mutex);
ret = prepare_kprobe(p);
mutex_unlock(&text_mutex);
cpus_read_unlock();
if (ret)
goto out;
INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
ret = arm_kprobe(p);
if (ret) {
hlist_del_rcu(&p->hlist);
synchronize_rcu();
goto out;
}
}
/* Try to optimize kprobe */
try_to_optimize_kprobe(p);
out:
mutex_unlock(&kprobe_mutex);
if (probed_mod) if (probed_mod)
module_put(probed_mod); module_put(probed_mod);
@ -1858,12 +1853,11 @@ void unregister_kprobes(struct kprobe **kps, int num)
if (num <= 0) if (num <= 0)
return; return;
mutex_lock(&kprobe_mutex); scoped_guard(mutex, &kprobe_mutex) {
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
if (__unregister_kprobe_top(kps[i]) < 0) if (__unregister_kprobe_top(kps[i]) < 0)
kps[i]->addr = NULL; kps[i]->addr = NULL;
mutex_unlock(&kprobe_mutex); }
synchronize_rcu(); synchronize_rcu();
for (i = 0; i < num; i++) for (i = 0; i < num; i++)
if (kps[i]->addr) if (kps[i]->addr)
@ -2302,8 +2296,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
if (num <= 0) if (num <= 0)
return; return;
mutex_lock(&kprobe_mutex);
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
guard(mutex)(&kprobe_mutex);
if (__unregister_kprobe_top(&rps[i]->kp) < 0) if (__unregister_kprobe_top(&rps[i]->kp) < 0)
rps[i]->kp.addr = NULL; rps[i]->kp.addr = NULL;
#ifdef CONFIG_KRETPROBE_ON_RETHOOK #ifdef CONFIG_KRETPROBE_ON_RETHOOK
@ -2312,7 +2307,6 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
rcu_assign_pointer(rps[i]->rph->rp, NULL); rcu_assign_pointer(rps[i]->rph->rp, NULL);
#endif #endif
} }
mutex_unlock(&kprobe_mutex);
synchronize_rcu(); synchronize_rcu();
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
@ -2393,18 +2387,14 @@ static void kill_kprobe(struct kprobe *p)
/* Disable one kprobe */ /* Disable one kprobe */
int disable_kprobe(struct kprobe *kp) int disable_kprobe(struct kprobe *kp)
{ {
int ret = 0;
struct kprobe *p; struct kprobe *p;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* Disable this kprobe */ /* Disable this kprobe */
p = __disable_kprobe(kp); p = __disable_kprobe(kp);
if (IS_ERR(p))
ret = PTR_ERR(p);
mutex_unlock(&kprobe_mutex); return IS_ERR(p) ? PTR_ERR(p) : 0;
return ret;
} }
EXPORT_SYMBOL_GPL(disable_kprobe); EXPORT_SYMBOL_GPL(disable_kprobe);
@ -2414,20 +2404,16 @@ int enable_kprobe(struct kprobe *kp)
int ret = 0; int ret = 0;
struct kprobe *p; struct kprobe *p;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* Check whether specified probe is valid. */ /* Check whether specified probe is valid. */
p = __get_valid_kprobe(kp); p = __get_valid_kprobe(kp);
if (unlikely(p == NULL)) { if (unlikely(p == NULL))
ret = -EINVAL; return -EINVAL;
goto out;
}
if (kprobe_gone(kp)) { if (kprobe_gone(kp))
/* This kprobe has gone, we couldn't enable it. */ /* This kprobe has gone, we couldn't enable it. */
ret = -EINVAL; return -EINVAL;
goto out;
}
if (p != kp) if (p != kp)
kp->flags &= ~KPROBE_FLAG_DISABLED; kp->flags &= ~KPROBE_FLAG_DISABLED;
@ -2441,8 +2427,6 @@ int enable_kprobe(struct kprobe *kp)
kp->flags |= KPROBE_FLAG_DISABLED; kp->flags |= KPROBE_FLAG_DISABLED;
} }
} }
out:
mutex_unlock(&kprobe_mutex);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(enable_kprobe); EXPORT_SYMBOL_GPL(enable_kprobe);
@ -2630,11 +2614,11 @@ static int kprobes_module_callback(struct notifier_block *nb,
unsigned int i; unsigned int i;
int checkcore = (val == MODULE_STATE_GOING); int checkcore = (val == MODULE_STATE_GOING);
if (val == MODULE_STATE_COMING) { guard(mutex)(&kprobe_mutex);
mutex_lock(&kprobe_mutex);
if (val == MODULE_STATE_COMING)
add_module_kprobe_blacklist(mod); add_module_kprobe_blacklist(mod);
mutex_unlock(&kprobe_mutex);
}
if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
return NOTIFY_DONE; return NOTIFY_DONE;
@ -2644,7 +2628,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
* notified, only '.init.text' section would be freed. We need to * notified, only '.init.text' section would be freed. We need to
* disable kprobes which have been inserted in the sections. * disable kprobes which have been inserted in the sections.
*/ */
mutex_lock(&kprobe_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry(p, head, hlist) hlist_for_each_entry(p, head, hlist)
@ -2667,7 +2650,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
} }
if (val == MODULE_STATE_GOING) if (val == MODULE_STATE_GOING)
remove_module_kprobe_blacklist(mod); remove_module_kprobe_blacklist(mod);
mutex_unlock(&kprobe_mutex);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
@ -2695,7 +2677,7 @@ void kprobe_free_init_mem(void)
struct kprobe *p; struct kprobe *p;
int i; int i;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* Kill all kprobes on initmem because the target code has been freed. */ /* Kill all kprobes on initmem because the target code has been freed. */
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
@ -2705,8 +2687,6 @@ void kprobe_free_init_mem(void)
kill_kprobe(p); kill_kprobe(p);
} }
} }
mutex_unlock(&kprobe_mutex);
} }
static int __init init_kprobes(void) static int __init init_kprobes(void)
@ -2902,11 +2882,11 @@ static int arm_all_kprobes(void)
unsigned int i, total = 0, errors = 0; unsigned int i, total = 0, errors = 0;
int err, ret = 0; int err, ret = 0;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* If kprobes are armed, just return */ /* If kprobes are armed, just return */
if (!kprobes_all_disarmed) if (!kprobes_all_disarmed)
goto already_enabled; return 0;
/* /*
* optimize_kprobe() called by arm_kprobe() checks * optimize_kprobe() called by arm_kprobe() checks
@ -2936,8 +2916,6 @@ static int arm_all_kprobes(void)
else else
pr_info("Kprobes globally enabled\n"); pr_info("Kprobes globally enabled\n");
already_enabled:
mutex_unlock(&kprobe_mutex);
return ret; return ret;
} }
@ -2948,13 +2926,11 @@ static int disarm_all_kprobes(void)
unsigned int i, total = 0, errors = 0; unsigned int i, total = 0, errors = 0;
int err, ret = 0; int err, ret = 0;
mutex_lock(&kprobe_mutex); guard(mutex)(&kprobe_mutex);
/* If kprobes are already disarmed, just return */ /* If kprobes are already disarmed, just return */
if (kprobes_all_disarmed) { if (kprobes_all_disarmed)
mutex_unlock(&kprobe_mutex);
return 0; return 0;
}
kprobes_all_disarmed = true; kprobes_all_disarmed = true;
@ -2979,11 +2955,8 @@ static int disarm_all_kprobes(void)
else else
pr_info("Kprobes globally disabled\n"); pr_info("Kprobes globally disabled\n");
mutex_unlock(&kprobe_mutex);
/* Wait for disarming all kprobes by optimizer */ /* Wait for disarming all kprobes by optimizer */
wait_for_kprobe_optimizer(); wait_for_kprobe_optimizer_locked();
return ret; return ret;
} }

View File

@ -63,9 +63,8 @@ int dyn_event_register(struct dyn_event_operations *ops)
return -EINVAL; return -EINVAL;
INIT_LIST_HEAD(&ops->list); INIT_LIST_HEAD(&ops->list);
mutex_lock(&dyn_event_ops_mutex); guard(mutex)(&dyn_event_ops_mutex);
list_add_tail(&ops->list, &dyn_event_ops_list); list_add_tail(&ops->list, &dyn_event_ops_list);
mutex_unlock(&dyn_event_ops_mutex);
return 0; return 0;
} }
@ -106,20 +105,20 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
goto out; goto out;
} }
mutex_lock(&event_mutex); scoped_guard(mutex, &event_mutex) {
for_each_dyn_event_safe(pos, n) { for_each_dyn_event_safe(pos, n) {
if (type && type != pos->ops) if (type && type != pos->ops)
continue; continue;
if (!pos->ops->match(system, event, if (!pos->ops->match(system, event,
argc - 1, (const char **)argv + 1, pos)) argc - 1, (const char **)argv + 1, pos))
continue; continue;
ret = pos->ops->free(pos); ret = pos->ops->free(pos);
if (ret) if (ret)
break; break;
}
tracing_reset_all_online_cpus();
} }
tracing_reset_all_online_cpus();
mutex_unlock(&event_mutex);
out: out:
argv_free(argv); argv_free(argv);
return ret; return ret;
@ -133,13 +132,12 @@ static int create_dyn_event(const char *raw_command)
if (raw_command[0] == '-' || raw_command[0] == '!') if (raw_command[0] == '-' || raw_command[0] == '!')
return dyn_event_release(raw_command, NULL); return dyn_event_release(raw_command, NULL);
mutex_lock(&dyn_event_ops_mutex); guard(mutex)(&dyn_event_ops_mutex);
list_for_each_entry(ops, &dyn_event_ops_list, list) { list_for_each_entry(ops, &dyn_event_ops_list, list) {
ret = ops->create(raw_command); ret = ops->create(raw_command);
if (!ret || ret != -ECANCELED) if (!ret || ret != -ECANCELED)
break; break;
} }
mutex_unlock(&dyn_event_ops_mutex);
if (ret == -ECANCELED) if (ret == -ECANCELED)
ret = -EINVAL; ret = -EINVAL;
@ -198,7 +196,7 @@ int dyn_events_release_all(struct dyn_event_operations *type)
struct dyn_event *ev, *tmp; struct dyn_event *ev, *tmp;
int ret = 0; int ret = 0;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
for_each_dyn_event(ev) { for_each_dyn_event(ev) {
if (type && ev->ops != type) if (type && ev->ops != type)
continue; continue;
@ -216,7 +214,6 @@ int dyn_events_release_all(struct dyn_event_operations *type)
} }
out: out:
tracing_reset_all_online_cpus(); tracing_reset_all_online_cpus();
mutex_unlock(&event_mutex);
return ret; return ret;
} }

View File

@ -917,10 +917,10 @@ static int __trace_eprobe_create(int argc, const char *argv[])
goto error; goto error;
} }
mutex_lock(&event_mutex); scoped_guard(mutex, &event_mutex) {
event_call = find_and_get_event(sys_name, sys_event); event_call = find_and_get_event(sys_name, sys_event);
ep = alloc_event_probe(group, event, event_call, argc - 2); ep = alloc_event_probe(group, event, event_call, argc - 2);
mutex_unlock(&event_mutex); }
if (IS_ERR(ep)) { if (IS_ERR(ep)) {
ret = PTR_ERR(ep); ret = PTR_ERR(ep);
@ -952,23 +952,21 @@ static int __trace_eprobe_create(int argc, const char *argv[])
if (ret < 0) if (ret < 0)
goto error; goto error;
init_trace_eprobe_call(ep); init_trace_eprobe_call(ep);
mutex_lock(&event_mutex); scoped_guard(mutex, &event_mutex) {
ret = trace_probe_register_event_call(&ep->tp); ret = trace_probe_register_event_call(&ep->tp);
if (ret) { if (ret) {
if (ret == -EEXIST) { if (ret == -EEXIST) {
trace_probe_log_set_index(0); trace_probe_log_set_index(0);
trace_probe_log_err(0, EVENT_EXIST); trace_probe_log_err(0, EVENT_EXIST);
}
goto error;
}
ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
if (ret < 0) {
trace_probe_unregister_event_call(&ep->tp);
goto error;
} }
mutex_unlock(&event_mutex);
goto error;
} }
ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
if (ret < 0) {
trace_probe_unregister_event_call(&ep->tp);
mutex_unlock(&event_mutex);
goto error;
}
mutex_unlock(&event_mutex);
return ret; return ret;
parse_error: parse_error:
ret = -EINVAL; ret = -EINVAL;

View File

@ -634,7 +634,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
struct trace_kprobe *old_tk; struct trace_kprobe *old_tk;
int ret; int ret;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
old_tk = find_trace_kprobe(trace_probe_name(&tk->tp), old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
trace_probe_group_name(&tk->tp)); trace_probe_group_name(&tk->tp));
@ -642,11 +642,9 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) { if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
trace_probe_log_set_index(0); trace_probe_log_set_index(0);
trace_probe_log_err(0, DIFF_PROBE_TYPE); trace_probe_log_err(0, DIFF_PROBE_TYPE);
ret = -EEXIST; return -EEXIST;
} else {
ret = append_trace_kprobe(tk, old_tk);
} }
goto end; return append_trace_kprobe(tk, old_tk);
} }
/* Register new event */ /* Register new event */
@ -657,7 +655,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
trace_probe_log_err(0, EVENT_EXIST); trace_probe_log_err(0, EVENT_EXIST);
} else } else
pr_warn("Failed to register probe event(%d)\n", ret); pr_warn("Failed to register probe event(%d)\n", ret);
goto end; return ret;
} }
/* Register k*probe */ /* Register k*probe */
@ -672,8 +670,6 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
else else
dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp)); dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
end:
mutex_unlock(&event_mutex);
return ret; return ret;
} }
@ -706,7 +702,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
return NOTIFY_DONE; return NOTIFY_DONE;
/* Update probes on coming module */ /* Update probes on coming module */
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
for_each_trace_kprobe(tk, pos) { for_each_trace_kprobe(tk, pos) {
if (trace_kprobe_within_module(tk, mod)) { if (trace_kprobe_within_module(tk, mod)) {
/* Don't need to check busy - this should have gone. */ /* Don't need to check busy - this should have gone. */
@ -718,7 +714,6 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
module_name(mod), ret); module_name(mod), ret);
} }
} }
mutex_unlock(&event_mutex);
return NOTIFY_DONE; return NOTIFY_DONE;
} }
@ -1968,13 +1963,12 @@ static __init void enable_boot_kprobe_events(void)
struct trace_kprobe *tk; struct trace_kprobe *tk;
struct dyn_event *pos; struct dyn_event *pos;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
for_each_trace_kprobe(tk, pos) { for_each_trace_kprobe(tk, pos) {
list_for_each_entry(file, &tr->events, list) list_for_each_entry(file, &tr->events, list)
if (file->event_call == trace_probe_event_call(&tk->tp)) if (file->event_call == trace_probe_event_call(&tk->tp))
trace_event_enable_disable(file, 1, 0); trace_event_enable_disable(file, 1, 0);
} }
mutex_unlock(&event_mutex);
} }
static __init void setup_boot_kprobe_events(void) static __init void setup_boot_kprobe_events(void)

View File

@ -498,11 +498,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
struct trace_uprobe *old_tu; struct trace_uprobe *old_tu;
int ret; int ret;
mutex_lock(&event_mutex); guard(mutex)(&event_mutex);
ret = validate_ref_ctr_offset(tu); ret = validate_ref_ctr_offset(tu);
if (ret) if (ret)
goto end; return ret;
/* register as an event */ /* register as an event */
old_tu = find_probe_event(trace_probe_name(&tu->tp), old_tu = find_probe_event(trace_probe_name(&tu->tp),
@ -511,11 +511,9 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
if (is_ret_probe(tu) != is_ret_probe(old_tu)) { if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
trace_probe_log_set_index(0); trace_probe_log_set_index(0);
trace_probe_log_err(0, DIFF_PROBE_TYPE); trace_probe_log_err(0, DIFF_PROBE_TYPE);
ret = -EEXIST; return -EEXIST;
} else {
ret = append_trace_uprobe(tu, old_tu);
} }
goto end; return append_trace_uprobe(tu, old_tu);
} }
ret = register_uprobe_event(tu); ret = register_uprobe_event(tu);
@ -525,14 +523,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
trace_probe_log_err(0, EVENT_EXIST); trace_probe_log_err(0, EVENT_EXIST);
} else } else
pr_warn("Failed to register probe event(%d)\n", ret); pr_warn("Failed to register probe event(%d)\n", ret);
goto end; return ret;
} }
dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp)); dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
end:
mutex_unlock(&event_mutex);
return ret; return ret;
} }