mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 11:57:46 +00:00
Merge probes/for-next
This commit is contained in:
commit
154bf79648
@ -75,6 +75,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
extern bool static_key_initialized;
|
||||
|
||||
@ -347,6 +348,8 @@ static inline void static_key_disable(struct static_key *key)
|
||||
|
||||
#endif /* CONFIG_JUMP_LABEL */
|
||||
|
||||
DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock())
|
||||
|
||||
#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
|
||||
#define jump_label_enabled static_key_enabled
|
||||
|
||||
|
590
kernel/kprobes.c
590
kernel/kprobes.c
@ -39,6 +39,7 @@
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/execmem.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@ -140,45 +141,39 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c);
|
||||
kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
|
||||
{
|
||||
struct kprobe_insn_page *kip;
|
||||
kprobe_opcode_t *slot = NULL;
|
||||
|
||||
/* Since the slot array is not protected by rcu, we need a mutex */
|
||||
mutex_lock(&c->mutex);
|
||||
retry:
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(kip, &c->pages, list) {
|
||||
if (kip->nused < slots_per_page(c)) {
|
||||
int i;
|
||||
guard(mutex)(&c->mutex);
|
||||
do {
|
||||
guard(rcu)();
|
||||
list_for_each_entry_rcu(kip, &c->pages, list) {
|
||||
if (kip->nused < slots_per_page(c)) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < slots_per_page(c); i++) {
|
||||
if (kip->slot_used[i] == SLOT_CLEAN) {
|
||||
kip->slot_used[i] = SLOT_USED;
|
||||
kip->nused++;
|
||||
slot = kip->insns + (i * c->insn_size);
|
||||
rcu_read_unlock();
|
||||
goto out;
|
||||
for (i = 0; i < slots_per_page(c); i++) {
|
||||
if (kip->slot_used[i] == SLOT_CLEAN) {
|
||||
kip->slot_used[i] = SLOT_USED;
|
||||
kip->nused++;
|
||||
return kip->insns + (i * c->insn_size);
|
||||
}
|
||||
}
|
||||
/* kip->nused is broken. Fix it. */
|
||||
kip->nused = slots_per_page(c);
|
||||
WARN_ON(1);
|
||||
}
|
||||
/* kip->nused is broken. Fix it. */
|
||||
kip->nused = slots_per_page(c);
|
||||
WARN_ON(1);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* If there are any garbage slots, collect it and try again. */
|
||||
if (c->nr_garbage && collect_garbage_slots(c) == 0)
|
||||
goto retry;
|
||||
} while (c->nr_garbage && collect_garbage_slots(c) == 0);
|
||||
|
||||
/* All out of space. Need to allocate a new page. */
|
||||
kip = kmalloc(struct_size(kip, slot_used, slots_per_page(c)), GFP_KERNEL);
|
||||
if (!kip)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
kip->insns = c->alloc();
|
||||
if (!kip->insns) {
|
||||
kfree(kip);
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
INIT_LIST_HEAD(&kip->list);
|
||||
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
|
||||
@ -187,14 +182,12 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
|
||||
kip->ngarbage = 0;
|
||||
kip->cache = c;
|
||||
list_add_rcu(&kip->list, &c->pages);
|
||||
slot = kip->insns;
|
||||
|
||||
/* Record the perf ksymbol register event after adding the page */
|
||||
perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
|
||||
PAGE_SIZE, false, c->sym);
|
||||
out:
|
||||
mutex_unlock(&c->mutex);
|
||||
return slot;
|
||||
|
||||
return kip->insns;
|
||||
}
|
||||
|
||||
/* Return true if all garbages are collected, otherwise false. */
|
||||
@ -249,25 +242,35 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __free_insn_slot(struct kprobe_insn_cache *c,
|
||||
kprobe_opcode_t *slot, int dirty)
|
||||
static long __find_insn_page(struct kprobe_insn_cache *c,
|
||||
kprobe_opcode_t *slot, struct kprobe_insn_page **pkip)
|
||||
{
|
||||
struct kprobe_insn_page *kip;
|
||||
struct kprobe_insn_page *kip = NULL;
|
||||
long idx;
|
||||
|
||||
mutex_lock(&c->mutex);
|
||||
rcu_read_lock();
|
||||
guard(rcu)();
|
||||
list_for_each_entry_rcu(kip, &c->pages, list) {
|
||||
idx = ((long)slot - (long)kip->insns) /
|
||||
(c->insn_size * sizeof(kprobe_opcode_t));
|
||||
if (idx >= 0 && idx < slots_per_page(c))
|
||||
goto out;
|
||||
if (idx >= 0 && idx < slots_per_page(c)) {
|
||||
*pkip = kip;
|
||||
return idx;
|
||||
}
|
||||
}
|
||||
/* Could not find this slot. */
|
||||
WARN_ON(1);
|
||||
kip = NULL;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
*pkip = NULL;
|
||||
return -1;
|
||||
}
|
||||
|
||||
void __free_insn_slot(struct kprobe_insn_cache *c,
|
||||
kprobe_opcode_t *slot, int dirty)
|
||||
{
|
||||
struct kprobe_insn_page *kip = NULL;
|
||||
long idx;
|
||||
|
||||
guard(mutex)(&c->mutex);
|
||||
idx = __find_insn_page(c, slot, &kip);
|
||||
/* Mark and sweep: this may sleep */
|
||||
if (kip) {
|
||||
/* Check double free */
|
||||
@ -281,7 +284,6 @@ out:
|
||||
collect_one_slot(kip, idx);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&c->mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -600,47 +602,43 @@ static void kick_kprobe_optimizer(void)
|
||||
/* Kprobe jump optimizer */
|
||||
static void kprobe_optimizer(struct work_struct *work)
|
||||
{
|
||||
mutex_lock(&kprobe_mutex);
|
||||
cpus_read_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/*
|
||||
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
|
||||
* kprobes before waiting for quiesence period.
|
||||
*/
|
||||
do_unoptimize_kprobes();
|
||||
scoped_guard(cpus_read_lock) {
|
||||
guard(mutex)(&text_mutex);
|
||||
|
||||
/*
|
||||
* Step 2: Wait for quiesence period to ensure all potentially
|
||||
* preempted tasks to have normally scheduled. Because optprobe
|
||||
* may modify multiple instructions, there is a chance that Nth
|
||||
* instruction is preempted. In that case, such tasks can return
|
||||
* to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
|
||||
* Note that on non-preemptive kernel, this is transparently converted
|
||||
* to synchronoze_sched() to wait for all interrupts to have completed.
|
||||
*/
|
||||
synchronize_rcu_tasks();
|
||||
/*
|
||||
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
|
||||
* kprobes before waiting for quiesence period.
|
||||
*/
|
||||
do_unoptimize_kprobes();
|
||||
|
||||
/* Step 3: Optimize kprobes after quiesence period */
|
||||
do_optimize_kprobes();
|
||||
/*
|
||||
* Step 2: Wait for quiesence period to ensure all potentially
|
||||
* preempted tasks to have normally scheduled. Because optprobe
|
||||
* may modify multiple instructions, there is a chance that Nth
|
||||
* instruction is preempted. In that case, such tasks can return
|
||||
* to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
|
||||
* Note that on non-preemptive kernel, this is transparently converted
|
||||
* to synchronoze_sched() to wait for all interrupts to have completed.
|
||||
*/
|
||||
synchronize_rcu_tasks();
|
||||
|
||||
/* Step 4: Free cleaned kprobes after quiesence period */
|
||||
do_free_cleaned_kprobes();
|
||||
/* Step 3: Optimize kprobes after quiesence period */
|
||||
do_optimize_kprobes();
|
||||
|
||||
mutex_unlock(&text_mutex);
|
||||
cpus_read_unlock();
|
||||
/* Step 4: Free cleaned kprobes after quiesence period */
|
||||
do_free_cleaned_kprobes();
|
||||
}
|
||||
|
||||
/* Step 5: Kick optimizer again if needed */
|
||||
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
|
||||
kick_kprobe_optimizer();
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
/* Wait for completing optimization and unoptimization */
|
||||
void wait_for_kprobe_optimizer(void)
|
||||
static void wait_for_kprobe_optimizer_locked(void)
|
||||
{
|
||||
mutex_lock(&kprobe_mutex);
|
||||
lockdep_assert_held(&kprobe_mutex);
|
||||
|
||||
while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
@ -652,8 +650,14 @@ void wait_for_kprobe_optimizer(void)
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
/* Wait for completing optimization and unoptimization */
|
||||
void wait_for_kprobe_optimizer(void)
|
||||
{
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
wait_for_kprobe_optimizer_locked();
|
||||
}
|
||||
|
||||
bool optprobe_queued_unopt(struct optimized_kprobe *op)
|
||||
@ -852,29 +856,24 @@ static void try_to_optimize_kprobe(struct kprobe *p)
|
||||
return;
|
||||
|
||||
/* For preparing optimization, jump_label_text_reserved() is called. */
|
||||
cpus_read_lock();
|
||||
jump_label_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
guard(cpus_read_lock)();
|
||||
guard(jump_label_lock)();
|
||||
guard(mutex)(&text_mutex);
|
||||
|
||||
ap = alloc_aggr_kprobe(p);
|
||||
if (!ap)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
op = container_of(ap, struct optimized_kprobe, kp);
|
||||
if (!arch_prepared_optinsn(&op->optinsn)) {
|
||||
/* If failed to setup optimizing, fallback to kprobe. */
|
||||
arch_remove_optimized_kprobe(op);
|
||||
kfree(op);
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
|
||||
init_aggr_kprobe(ap, p);
|
||||
optimize_kprobe(ap); /* This just kicks optimizer thread. */
|
||||
|
||||
out:
|
||||
mutex_unlock(&text_mutex);
|
||||
jump_label_unlock();
|
||||
cpus_read_unlock();
|
||||
}
|
||||
|
||||
static void optimize_all_kprobes(void)
|
||||
@ -883,10 +882,10 @@ static void optimize_all_kprobes(void)
|
||||
struct kprobe *p;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
/* If optimization is already allowed, just return. */
|
||||
if (kprobes_allow_optimization)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
cpus_read_lock();
|
||||
kprobes_allow_optimization = true;
|
||||
@ -898,8 +897,6 @@ static void optimize_all_kprobes(void)
|
||||
}
|
||||
cpus_read_unlock();
|
||||
pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n");
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
@ -909,12 +906,10 @@ static void unoptimize_all_kprobes(void)
|
||||
struct kprobe *p;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
/* If optimization is already prohibited, just return. */
|
||||
if (!kprobes_allow_optimization) {
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
if (!kprobes_allow_optimization)
|
||||
return;
|
||||
}
|
||||
|
||||
cpus_read_lock();
|
||||
kprobes_allow_optimization = false;
|
||||
@ -926,10 +921,8 @@ static void unoptimize_all_kprobes(void)
|
||||
}
|
||||
}
|
||||
cpus_read_unlock();
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
/* Wait for unoptimizing completion. */
|
||||
wait_for_kprobe_optimizer();
|
||||
wait_for_kprobe_optimizer_locked();
|
||||
pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n");
|
||||
}
|
||||
|
||||
@ -941,7 +934,7 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table,
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&kprobe_sysctl_mutex);
|
||||
guard(mutex)(&kprobe_sysctl_mutex);
|
||||
sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
|
||||
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
||||
|
||||
@ -949,7 +942,6 @@ static int proc_kprobes_optimization_handler(const struct ctl_table *table,
|
||||
optimize_all_kprobes();
|
||||
else
|
||||
unoptimize_all_kprobes();
|
||||
mutex_unlock(&kprobe_sysctl_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1024,7 +1016,8 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
|
||||
#define __arm_kprobe(p) arch_arm_kprobe(p)
|
||||
#define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
|
||||
#define kprobe_disarmed(p) kprobe_disabled(p)
|
||||
#define wait_for_kprobe_optimizer() do {} while (0)
|
||||
#define wait_for_kprobe_optimizer_locked() \
|
||||
lockdep_assert_held(&kprobe_mutex)
|
||||
|
||||
static int reuse_unused_kprobe(struct kprobe *ap)
|
||||
{
|
||||
@ -1078,20 +1071,18 @@ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
|
||||
|
||||
if (*cnt == 0) {
|
||||
ret = register_ftrace_function(ops);
|
||||
if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret))
|
||||
goto err_ftrace;
|
||||
if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) {
|
||||
/*
|
||||
* At this point, sinec ops is not registered, we should be sefe from
|
||||
* registering empty filter.
|
||||
*/
|
||||
ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
(*cnt)++;
|
||||
return ret;
|
||||
|
||||
err_ftrace:
|
||||
/*
|
||||
* At this point, sinec ops is not registered, we should be sefe from
|
||||
* registering empty filter.
|
||||
*/
|
||||
ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int arm_kprobe_ftrace(struct kprobe *p)
|
||||
@ -1163,12 +1154,9 @@ static int arm_kprobe(struct kprobe *kp)
|
||||
if (unlikely(kprobe_ftrace(kp)))
|
||||
return arm_kprobe_ftrace(kp);
|
||||
|
||||
cpus_read_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
guard(cpus_read_lock)();
|
||||
guard(mutex)(&text_mutex);
|
||||
__arm_kprobe(kp);
|
||||
mutex_unlock(&text_mutex);
|
||||
cpus_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1177,12 +1165,9 @@ static int disarm_kprobe(struct kprobe *kp, bool reopt)
|
||||
if (unlikely(kprobe_ftrace(kp)))
|
||||
return disarm_kprobe_ftrace(kp);
|
||||
|
||||
cpus_read_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
guard(cpus_read_lock)();
|
||||
guard(mutex)(&text_mutex);
|
||||
__disarm_kprobe(kp, reopt);
|
||||
mutex_unlock(&text_mutex);
|
||||
cpus_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1299,63 +1284,56 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
|
||||
int ret = 0;
|
||||
struct kprobe *ap = orig_p;
|
||||
|
||||
cpus_read_lock();
|
||||
scoped_guard(cpus_read_lock) {
|
||||
/* For preparing optimization, jump_label_text_reserved() is called */
|
||||
guard(jump_label_lock)();
|
||||
guard(mutex)(&text_mutex);
|
||||
|
||||
/* For preparing optimization, jump_label_text_reserved() is called */
|
||||
jump_label_lock();
|
||||
mutex_lock(&text_mutex);
|
||||
|
||||
if (!kprobe_aggrprobe(orig_p)) {
|
||||
/* If 'orig_p' is not an 'aggr_kprobe', create new one. */
|
||||
ap = alloc_aggr_kprobe(orig_p);
|
||||
if (!ap) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
if (!kprobe_aggrprobe(orig_p)) {
|
||||
/* If 'orig_p' is not an 'aggr_kprobe', create new one. */
|
||||
ap = alloc_aggr_kprobe(orig_p);
|
||||
if (!ap)
|
||||
return -ENOMEM;
|
||||
init_aggr_kprobe(ap, orig_p);
|
||||
} else if (kprobe_unused(ap)) {
|
||||
/* This probe is going to die. Rescue it */
|
||||
ret = reuse_unused_kprobe(ap);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
init_aggr_kprobe(ap, orig_p);
|
||||
} else if (kprobe_unused(ap)) {
|
||||
/* This probe is going to die. Rescue it */
|
||||
ret = reuse_unused_kprobe(ap);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (kprobe_gone(ap)) {
|
||||
/*
|
||||
* Attempting to insert new probe at the same location that
|
||||
* had a probe in the module vaddr area which already
|
||||
* freed. So, the instruction slot has already been
|
||||
* released. We need a new slot for the new probe.
|
||||
*/
|
||||
ret = arch_prepare_kprobe(ap);
|
||||
if (ret)
|
||||
if (kprobe_gone(ap)) {
|
||||
/*
|
||||
* Even if fail to allocate new slot, don't need to
|
||||
* free the 'ap'. It will be used next time, or
|
||||
* freed by unregister_kprobe().
|
||||
* Attempting to insert new probe at the same location that
|
||||
* had a probe in the module vaddr area which already
|
||||
* freed. So, the instruction slot has already been
|
||||
* released. We need a new slot for the new probe.
|
||||
*/
|
||||
goto out;
|
||||
ret = arch_prepare_kprobe(ap);
|
||||
if (ret)
|
||||
/*
|
||||
* Even if fail to allocate new slot, don't need to
|
||||
* free the 'ap'. It will be used next time, or
|
||||
* freed by unregister_kprobe().
|
||||
*/
|
||||
return ret;
|
||||
|
||||
/* Prepare optimized instructions if possible. */
|
||||
prepare_optimized_kprobe(ap);
|
||||
/* Prepare optimized instructions if possible. */
|
||||
prepare_optimized_kprobe(ap);
|
||||
|
||||
/*
|
||||
* Clear gone flag to prevent allocating new slot again, and
|
||||
* set disabled flag because it is not armed yet.
|
||||
*/
|
||||
ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
|
||||
| KPROBE_FLAG_DISABLED;
|
||||
/*
|
||||
* Clear gone flag to prevent allocating new slot again, and
|
||||
* set disabled flag because it is not armed yet.
|
||||
*/
|
||||
ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
|
||||
| KPROBE_FLAG_DISABLED;
|
||||
}
|
||||
|
||||
/* Copy the insn slot of 'p' to 'ap'. */
|
||||
copy_kprobe(ap, p);
|
||||
ret = add_new_kprobe(ap, p);
|
||||
}
|
||||
|
||||
/* Copy the insn slot of 'p' to 'ap'. */
|
||||
copy_kprobe(ap, p);
|
||||
ret = add_new_kprobe(ap, p);
|
||||
|
||||
out:
|
||||
mutex_unlock(&text_mutex);
|
||||
jump_label_unlock();
|
||||
cpus_read_unlock();
|
||||
|
||||
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
|
||||
ap->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
if (!kprobes_all_disarmed) {
|
||||
@ -1448,7 +1426,7 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
|
||||
unsigned long offset, bool *on_func_entry)
|
||||
{
|
||||
if ((symbol_name && addr) || (!symbol_name && !addr))
|
||||
goto invalid;
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (symbol_name) {
|
||||
/*
|
||||
@ -1478,16 +1456,16 @@ _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name,
|
||||
* at the start of the function.
|
||||
*/
|
||||
addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry);
|
||||
if (addr)
|
||||
return addr;
|
||||
if (!addr)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
invalid:
|
||||
return ERR_PTR(-EINVAL);
|
||||
return addr;
|
||||
}
|
||||
|
||||
static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
|
||||
{
|
||||
bool on_func_entry;
|
||||
|
||||
return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
|
||||
}
|
||||
|
||||
@ -1505,15 +1483,15 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
|
||||
if (unlikely(!ap))
|
||||
return NULL;
|
||||
|
||||
if (p != ap) {
|
||||
list_for_each_entry(list_p, &ap->list, list)
|
||||
if (list_p == p)
|
||||
/* kprobe p is a valid probe */
|
||||
goto valid;
|
||||
return NULL;
|
||||
}
|
||||
valid:
|
||||
return ap;
|
||||
if (p == ap)
|
||||
return ap;
|
||||
|
||||
list_for_each_entry(list_p, &ap->list, list)
|
||||
if (list_p == p)
|
||||
/* kprobe p is a valid probe */
|
||||
return ap;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1522,14 +1500,12 @@ valid:
|
||||
*/
|
||||
static inline int warn_kprobe_rereg(struct kprobe *p)
|
||||
{
|
||||
int ret = 0;
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
if (WARN_ON_ONCE(__get_valid_kprobe(p)))
|
||||
ret = -EINVAL;
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return -EINVAL;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_ftrace_location(struct kprobe *p)
|
||||
@ -1565,17 +1541,23 @@ static int check_kprobe_address_safe(struct kprobe *p,
|
||||
ret = check_ftrace_location(p);
|
||||
if (ret)
|
||||
return ret;
|
||||
jump_label_lock();
|
||||
preempt_disable();
|
||||
|
||||
guard(jump_label_lock)();
|
||||
|
||||
/* Ensure the address is in a text area, and find a module if exists. */
|
||||
*probed_mod = NULL;
|
||||
if (!core_kernel_text((unsigned long) p->addr)) {
|
||||
guard(preempt)();
|
||||
*probed_mod = __module_text_address((unsigned long) p->addr);
|
||||
if (!(*probed_mod)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (!(*probed_mod))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* We must hold a refcount of the probed module while updating
|
||||
* its code to prohibit unexpected unloading.
|
||||
*/
|
||||
if (unlikely(!try_module_get(*probed_mod)))
|
||||
return -ENOENT;
|
||||
}
|
||||
/* Ensure it is not in reserved area. */
|
||||
if (in_gate_area_no_mm((unsigned long) p->addr) ||
|
||||
@ -1584,21 +1566,12 @@ static int check_kprobe_address_safe(struct kprobe *p,
|
||||
static_call_text_reserved(p->addr, p->addr) ||
|
||||
find_bug((unsigned long)p->addr) ||
|
||||
is_cfi_preamble_symbol((unsigned long)p->addr)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
module_put(*probed_mod);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get module refcount and reject __init functions for loaded modules. */
|
||||
if (IS_ENABLED(CONFIG_MODULES) && *probed_mod) {
|
||||
/*
|
||||
* We must hold a refcount of the probed module while updating
|
||||
* its code to prohibit unexpected unloading.
|
||||
*/
|
||||
if (unlikely(!try_module_get(*probed_mod))) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the module freed '.init.text', we couldn't insert
|
||||
* kprobes in there.
|
||||
@ -1606,27 +1579,58 @@ static int check_kprobe_address_safe(struct kprobe *p,
|
||||
if (within_module_init((unsigned long)p->addr, *probed_mod) &&
|
||||
!module_is_coming(*probed_mod)) {
|
||||
module_put(*probed_mod);
|
||||
*probed_mod = NULL;
|
||||
ret = -ENOENT;
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
jump_label_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
static int __register_kprobe(struct kprobe *p)
|
||||
{
|
||||
int ret;
|
||||
struct kprobe *old_p;
|
||||
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
old_p = get_kprobe(p->addr);
|
||||
if (old_p)
|
||||
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
|
||||
return register_aggr_kprobe(old_p, p);
|
||||
|
||||
scoped_guard(cpus_read_lock) {
|
||||
/* Prevent text modification */
|
||||
guard(mutex)(&text_mutex);
|
||||
ret = prepare_kprobe(p);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
INIT_HLIST_NODE(&p->hlist);
|
||||
hlist_add_head_rcu(&p->hlist,
|
||||
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
|
||||
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
|
||||
ret = arm_kprobe(p);
|
||||
if (ret) {
|
||||
hlist_del_rcu(&p->hlist);
|
||||
synchronize_rcu();
|
||||
}
|
||||
}
|
||||
|
||||
/* Try to optimize kprobe */
|
||||
try_to_optimize_kprobe(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int register_kprobe(struct kprobe *p)
|
||||
{
|
||||
int ret;
|
||||
struct kprobe *old_p;
|
||||
struct module *probed_mod;
|
||||
kprobe_opcode_t *addr;
|
||||
bool on_func_entry;
|
||||
|
||||
/* Adjust probe address from symbol */
|
||||
/* Canonicalize probe address from symbol */
|
||||
addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry);
|
||||
if (IS_ERR(addr))
|
||||
return PTR_ERR(addr);
|
||||
@ -1638,6 +1642,8 @@ int register_kprobe(struct kprobe *p)
|
||||
|
||||
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
|
||||
p->flags &= KPROBE_FLAG_DISABLED;
|
||||
if (on_func_entry)
|
||||
p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
|
||||
p->nmissed = 0;
|
||||
INIT_LIST_HEAD(&p->list);
|
||||
|
||||
@ -1645,44 +1651,7 @@ int register_kprobe(struct kprobe *p)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
|
||||
if (on_func_entry)
|
||||
p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY;
|
||||
|
||||
old_p = get_kprobe(p->addr);
|
||||
if (old_p) {
|
||||
/* Since this may unoptimize 'old_p', locking 'text_mutex'. */
|
||||
ret = register_aggr_kprobe(old_p, p);
|
||||
goto out;
|
||||
}
|
||||
|
||||
cpus_read_lock();
|
||||
/* Prevent text modification */
|
||||
mutex_lock(&text_mutex);
|
||||
ret = prepare_kprobe(p);
|
||||
mutex_unlock(&text_mutex);
|
||||
cpus_read_unlock();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
INIT_HLIST_NODE(&p->hlist);
|
||||
hlist_add_head_rcu(&p->hlist,
|
||||
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
|
||||
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
|
||||
ret = arm_kprobe(p);
|
||||
if (ret) {
|
||||
hlist_del_rcu(&p->hlist);
|
||||
synchronize_rcu();
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Try to optimize kprobe */
|
||||
try_to_optimize_kprobe(p);
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
ret = __register_kprobe(p);
|
||||
|
||||
if (probed_mod)
|
||||
module_put(probed_mod);
|
||||
@ -1761,29 +1730,31 @@ static int __unregister_kprobe_top(struct kprobe *p)
|
||||
if (IS_ERR(ap))
|
||||
return PTR_ERR(ap);
|
||||
|
||||
if (ap == p)
|
||||
/*
|
||||
* This probe is an independent(and non-optimized) kprobe
|
||||
* (not an aggrprobe). Remove from the hash list.
|
||||
*/
|
||||
goto disarmed;
|
||||
WARN_ON(ap != p && !kprobe_aggrprobe(ap));
|
||||
|
||||
/* Following process expects this probe is an aggrprobe */
|
||||
WARN_ON(!kprobe_aggrprobe(ap));
|
||||
|
||||
if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
|
||||
/*
|
||||
* If the probe is an independent(and non-optimized) kprobe
|
||||
* (not an aggrprobe), the last kprobe on the aggrprobe, or
|
||||
* kprobe is already disarmed, just remove from the hash list.
|
||||
*/
|
||||
if (ap == p ||
|
||||
(list_is_singular(&ap->list) && kprobe_disarmed(ap))) {
|
||||
/*
|
||||
* !disarmed could be happen if the probe is under delayed
|
||||
* unoptimizing.
|
||||
*/
|
||||
goto disarmed;
|
||||
else {
|
||||
/* If disabling probe has special handlers, update aggrprobe */
|
||||
if (p->post_handler && !kprobe_gone(p)) {
|
||||
list_for_each_entry(list_p, &ap->list, list) {
|
||||
if ((list_p != p) && (list_p->post_handler))
|
||||
goto noclean;
|
||||
}
|
||||
hlist_del_rcu(&ap->hlist);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* If disabling probe has special handlers, update aggrprobe */
|
||||
if (p->post_handler && !kprobe_gone(p)) {
|
||||
list_for_each_entry(list_p, &ap->list, list) {
|
||||
if ((list_p != p) && (list_p->post_handler))
|
||||
break;
|
||||
}
|
||||
/* No other probe has post_handler */
|
||||
if (list_entry_is_head(list_p, &ap->list, list)) {
|
||||
/*
|
||||
* For the kprobe-on-ftrace case, we keep the
|
||||
* post_handler setting to identify this aggrprobe
|
||||
@ -1792,24 +1763,21 @@ static int __unregister_kprobe_top(struct kprobe *p)
|
||||
if (!kprobe_ftrace(ap))
|
||||
ap->post_handler = NULL;
|
||||
}
|
||||
noclean:
|
||||
/*
|
||||
* Remove from the aggrprobe: this path will do nothing in
|
||||
* __unregister_kprobe_bottom().
|
||||
*/
|
||||
list_del_rcu(&p->list);
|
||||
if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
|
||||
/*
|
||||
* Try to optimize this probe again, because post
|
||||
* handler may have been changed.
|
||||
*/
|
||||
optimize_kprobe(ap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove from the aggrprobe: this path will do nothing in
|
||||
* __unregister_kprobe_bottom().
|
||||
*/
|
||||
list_del_rcu(&p->list);
|
||||
if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
|
||||
/*
|
||||
* Try to optimize this probe again, because post
|
||||
* handler may have been changed.
|
||||
*/
|
||||
optimize_kprobe(ap);
|
||||
return 0;
|
||||
|
||||
disarmed:
|
||||
hlist_del_rcu(&ap->hlist);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __unregister_kprobe_bottom(struct kprobe *p)
|
||||
@ -1858,12 +1826,11 @@ void unregister_kprobes(struct kprobe **kps, int num)
|
||||
|
||||
if (num <= 0)
|
||||
return;
|
||||
mutex_lock(&kprobe_mutex);
|
||||
for (i = 0; i < num; i++)
|
||||
if (__unregister_kprobe_top(kps[i]) < 0)
|
||||
kps[i]->addr = NULL;
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
scoped_guard(mutex, &kprobe_mutex) {
|
||||
for (i = 0; i < num; i++)
|
||||
if (__unregister_kprobe_top(kps[i]) < 0)
|
||||
kps[i]->addr = NULL;
|
||||
}
|
||||
synchronize_rcu();
|
||||
for (i = 0; i < num; i++)
|
||||
if (kps[i]->addr)
|
||||
@ -2302,8 +2269,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
|
||||
|
||||
if (num <= 0)
|
||||
return;
|
||||
mutex_lock(&kprobe_mutex);
|
||||
for (i = 0; i < num; i++) {
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
if (__unregister_kprobe_top(&rps[i]->kp) < 0)
|
||||
rps[i]->kp.addr = NULL;
|
||||
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
|
||||
@ -2312,7 +2280,6 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
|
||||
rcu_assign_pointer(rps[i]->rph->rp, NULL);
|
||||
#endif
|
||||
}
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
synchronize_rcu();
|
||||
for (i = 0; i < num; i++) {
|
||||
@ -2393,18 +2360,14 @@ static void kill_kprobe(struct kprobe *p)
|
||||
/* Disable one kprobe */
|
||||
int disable_kprobe(struct kprobe *kp)
|
||||
{
|
||||
int ret = 0;
|
||||
struct kprobe *p;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/* Disable this kprobe */
|
||||
p = __disable_kprobe(kp);
|
||||
if (IS_ERR(p))
|
||||
ret = PTR_ERR(p);
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
return IS_ERR(p) ? PTR_ERR(p) : 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disable_kprobe);
|
||||
|
||||
@ -2414,20 +2377,16 @@ int enable_kprobe(struct kprobe *kp)
|
||||
int ret = 0;
|
||||
struct kprobe *p;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/* Check whether specified probe is valid. */
|
||||
p = __get_valid_kprobe(kp);
|
||||
if (unlikely(p == NULL)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(p == NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (kprobe_gone(kp)) {
|
||||
if (kprobe_gone(kp))
|
||||
/* This kprobe has gone, we couldn't enable it. */
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
return -EINVAL;
|
||||
|
||||
if (p != kp)
|
||||
kp->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
@ -2441,8 +2400,6 @@ int enable_kprobe(struct kprobe *kp)
|
||||
kp->flags |= KPROBE_FLAG_DISABLED;
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enable_kprobe);
|
||||
@ -2630,11 +2587,11 @@ static int kprobes_module_callback(struct notifier_block *nb,
|
||||
unsigned int i;
|
||||
int checkcore = (val == MODULE_STATE_GOING);
|
||||
|
||||
if (val == MODULE_STATE_COMING) {
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
if (val == MODULE_STATE_COMING)
|
||||
add_module_kprobe_blacklist(mod);
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
@ -2644,7 +2601,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
|
||||
* notified, only '.init.text' section would be freed. We need to
|
||||
* disable kprobes which have been inserted in the sections.
|
||||
*/
|
||||
mutex_lock(&kprobe_mutex);
|
||||
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
||||
head = &kprobe_table[i];
|
||||
hlist_for_each_entry(p, head, hlist)
|
||||
@ -2667,7 +2623,6 @@ static int kprobes_module_callback(struct notifier_block *nb,
|
||||
}
|
||||
if (val == MODULE_STATE_GOING)
|
||||
remove_module_kprobe_blacklist(mod);
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
@ -2695,7 +2650,7 @@ void kprobe_free_init_mem(void)
|
||||
struct kprobe *p;
|
||||
int i;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/* Kill all kprobes on initmem because the target code has been freed. */
|
||||
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
||||
@ -2705,8 +2660,6 @@ void kprobe_free_init_mem(void)
|
||||
kill_kprobe(p);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
static int __init init_kprobes(void)
|
||||
@ -2902,11 +2855,11 @@ static int arm_all_kprobes(void)
|
||||
unsigned int i, total = 0, errors = 0;
|
||||
int err, ret = 0;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/* If kprobes are armed, just return */
|
||||
if (!kprobes_all_disarmed)
|
||||
goto already_enabled;
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* optimize_kprobe() called by arm_kprobe() checks
|
||||
@ -2936,8 +2889,6 @@ static int arm_all_kprobes(void)
|
||||
else
|
||||
pr_info("Kprobes globally enabled\n");
|
||||
|
||||
already_enabled:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2948,13 +2899,11 @@ static int disarm_all_kprobes(void)
|
||||
unsigned int i, total = 0, errors = 0;
|
||||
int err, ret = 0;
|
||||
|
||||
mutex_lock(&kprobe_mutex);
|
||||
guard(mutex)(&kprobe_mutex);
|
||||
|
||||
/* If kprobes are already disarmed, just return */
|
||||
if (kprobes_all_disarmed) {
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
if (kprobes_all_disarmed)
|
||||
return 0;
|
||||
}
|
||||
|
||||
kprobes_all_disarmed = true;
|
||||
|
||||
@ -2979,11 +2928,8 @@ static int disarm_all_kprobes(void)
|
||||
else
|
||||
pr_info("Kprobes globally disabled\n");
|
||||
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
||||
/* Wait for disarming all kprobes by optimizer */
|
||||
wait_for_kprobe_optimizer();
|
||||
|
||||
wait_for_kprobe_optimizer_locked();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -917,10 +917,10 @@ static int __trace_eprobe_create(int argc, const char *argv[])
|
||||
goto error;
|
||||
}
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
event_call = find_and_get_event(sys_name, sys_event);
|
||||
ep = alloc_event_probe(group, event, event_call, argc - 2);
|
||||
mutex_unlock(&event_mutex);
|
||||
scoped_guard(mutex, &event_mutex) {
|
||||
event_call = find_and_get_event(sys_name, sys_event);
|
||||
ep = alloc_event_probe(group, event, event_call, argc - 2);
|
||||
}
|
||||
|
||||
if (IS_ERR(ep)) {
|
||||
ret = PTR_ERR(ep);
|
||||
@ -952,23 +952,21 @@ static int __trace_eprobe_create(int argc, const char *argv[])
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
init_trace_eprobe_call(ep);
|
||||
mutex_lock(&event_mutex);
|
||||
ret = trace_probe_register_event_call(&ep->tp);
|
||||
if (ret) {
|
||||
if (ret == -EEXIST) {
|
||||
trace_probe_log_set_index(0);
|
||||
trace_probe_log_err(0, EVENT_EXIST);
|
||||
scoped_guard(mutex, &event_mutex) {
|
||||
ret = trace_probe_register_event_call(&ep->tp);
|
||||
if (ret) {
|
||||
if (ret == -EEXIST) {
|
||||
trace_probe_log_set_index(0);
|
||||
trace_probe_log_err(0, EVENT_EXIST);
|
||||
}
|
||||
goto error;
|
||||
}
|
||||
ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
|
||||
if (ret < 0) {
|
||||
trace_probe_unregister_event_call(&ep->tp);
|
||||
goto error;
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
goto error;
|
||||
}
|
||||
ret = dyn_event_add(&ep->devent, &ep->tp.event->call);
|
||||
if (ret < 0) {
|
||||
trace_probe_unregister_event_call(&ep->tp);
|
||||
mutex_unlock(&event_mutex);
|
||||
goto error;
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
return ret;
|
||||
parse_error:
|
||||
ret = -EINVAL;
|
||||
|
@ -634,7 +634,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
|
||||
struct trace_kprobe *old_tk;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
|
||||
trace_probe_group_name(&tk->tp));
|
||||
@ -642,11 +642,9 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
|
||||
if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
|
||||
trace_probe_log_set_index(0);
|
||||
trace_probe_log_err(0, DIFF_PROBE_TYPE);
|
||||
ret = -EEXIST;
|
||||
} else {
|
||||
ret = append_trace_kprobe(tk, old_tk);
|
||||
return -EEXIST;
|
||||
}
|
||||
goto end;
|
||||
return append_trace_kprobe(tk, old_tk);
|
||||
}
|
||||
|
||||
/* Register new event */
|
||||
@ -657,7 +655,7 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
|
||||
trace_probe_log_err(0, EVENT_EXIST);
|
||||
} else
|
||||
pr_warn("Failed to register probe event(%d)\n", ret);
|
||||
goto end;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Register k*probe */
|
||||
@ -672,8 +670,6 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
|
||||
else
|
||||
dyn_event_add(&tk->devent, trace_probe_event_call(&tk->tp));
|
||||
|
||||
end:
|
||||
mutex_unlock(&event_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -706,7 +702,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Update probes on coming module */
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
for_each_trace_kprobe(tk, pos) {
|
||||
if (trace_kprobe_within_module(tk, mod)) {
|
||||
/* Don't need to check busy - this should have gone. */
|
||||
@ -718,7 +714,6 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
|
||||
module_name(mod), ret);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
@ -1968,13 +1963,12 @@ static __init void enable_boot_kprobe_events(void)
|
||||
struct trace_kprobe *tk;
|
||||
struct dyn_event *pos;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
for_each_trace_kprobe(tk, pos) {
|
||||
list_for_each_entry(file, &tr->events, list)
|
||||
if (file->event_call == trace_probe_event_call(&tk->tp))
|
||||
trace_event_enable_disable(file, 1, 0);
|
||||
}
|
||||
mutex_unlock(&event_mutex);
|
||||
}
|
||||
|
||||
static __init void setup_boot_kprobe_events(void)
|
||||
|
@ -498,11 +498,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
|
||||
struct trace_uprobe *old_tu;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&event_mutex);
|
||||
guard(mutex)(&event_mutex);
|
||||
|
||||
ret = validate_ref_ctr_offset(tu);
|
||||
if (ret)
|
||||
goto end;
|
||||
return ret;
|
||||
|
||||
/* register as an event */
|
||||
old_tu = find_probe_event(trace_probe_name(&tu->tp),
|
||||
@ -511,11 +511,9 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
|
||||
if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
|
||||
trace_probe_log_set_index(0);
|
||||
trace_probe_log_err(0, DIFF_PROBE_TYPE);
|
||||
ret = -EEXIST;
|
||||
} else {
|
||||
ret = append_trace_uprobe(tu, old_tu);
|
||||
return -EEXIST;
|
||||
}
|
||||
goto end;
|
||||
return append_trace_uprobe(tu, old_tu);
|
||||
}
|
||||
|
||||
ret = register_uprobe_event(tu);
|
||||
@ -525,14 +523,11 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
|
||||
trace_probe_log_err(0, EVENT_EXIST);
|
||||
} else
|
||||
pr_warn("Failed to register probe event(%d)\n", ret);
|
||||
goto end;
|
||||
return ret;
|
||||
}
|
||||
|
||||
dyn_event_add(&tu->devent, trace_probe_event_call(&tu->tp));
|
||||
|
||||
end:
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user