From 87195a1ee332add27bd51448c6b54aad551a28f5 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 10 Sep 2024 10:43:12 -0700 Subject: [PATCH 01/21] uprobes: switch to RCU Tasks Trace flavor for better performance MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch switches uprobes SRCU usage to RCU Tasks Trace flavor, which is optimized for more lightweight and quick readers (at the expense of slower writers, which for uprobes is a fine tradeof) and has better performance and scalability with number of CPUs. Similarly to baseline vs SRCU, we've benchmarked SRCU-based implementation vs RCU Tasks Trace implementation. SRCU ==== uprobe-nop ( 1 cpus): 3.276 ± 0.005M/s ( 3.276M/s/cpu) uprobe-nop ( 2 cpus): 4.125 ± 0.002M/s ( 2.063M/s/cpu) uprobe-nop ( 4 cpus): 7.713 ± 0.002M/s ( 1.928M/s/cpu) uprobe-nop ( 8 cpus): 8.097 ± 0.006M/s ( 1.012M/s/cpu) uprobe-nop (16 cpus): 6.501 ± 0.056M/s ( 0.406M/s/cpu) uprobe-nop (32 cpus): 4.398 ± 0.084M/s ( 0.137M/s/cpu) uprobe-nop (64 cpus): 6.452 ± 0.000M/s ( 0.101M/s/cpu) uretprobe-nop ( 1 cpus): 2.055 ± 0.001M/s ( 2.055M/s/cpu) uretprobe-nop ( 2 cpus): 2.677 ± 0.000M/s ( 1.339M/s/cpu) uretprobe-nop ( 4 cpus): 4.561 ± 0.003M/s ( 1.140M/s/cpu) uretprobe-nop ( 8 cpus): 5.291 ± 0.002M/s ( 0.661M/s/cpu) uretprobe-nop (16 cpus): 5.065 ± 0.019M/s ( 0.317M/s/cpu) uretprobe-nop (32 cpus): 3.622 ± 0.003M/s ( 0.113M/s/cpu) uretprobe-nop (64 cpus): 3.723 ± 0.002M/s ( 0.058M/s/cpu) RCU Tasks Trace =============== uprobe-nop ( 1 cpus): 3.396 ± 0.002M/s ( 3.396M/s/cpu) uprobe-nop ( 2 cpus): 4.271 ± 0.006M/s ( 2.135M/s/cpu) uprobe-nop ( 4 cpus): 8.499 ± 0.015M/s ( 2.125M/s/cpu) uprobe-nop ( 8 cpus): 10.355 ± 0.028M/s ( 1.294M/s/cpu) uprobe-nop (16 cpus): 7.615 ± 0.099M/s ( 0.476M/s/cpu) uprobe-nop (32 cpus): 4.430 ± 0.007M/s ( 0.138M/s/cpu) uprobe-nop (64 cpus): 6.887 ± 0.020M/s ( 0.108M/s/cpu) uretprobe-nop ( 1 cpus): 2.174 ± 0.001M/s ( 2.174M/s/cpu) uretprobe-nop ( 2 cpus): 2.853 ± 0.001M/s ( 1.426M/s/cpu) uretprobe-nop ( 4 cpus): 4.913 ± 0.002M/s ( 1.228M/s/cpu) uretprobe-nop ( 8 cpus): 5.883 ± 0.002M/s ( 0.735M/s/cpu) uretprobe-nop (16 cpus): 5.147 ± 0.001M/s ( 0.322M/s/cpu) uretprobe-nop (32 cpus): 3.738 ± 0.008M/s ( 0.117M/s/cpu) uretprobe-nop (64 cpus): 4.397 ± 0.002M/s ( 0.069M/s/cpu) Peak throughput for uprobes increases from 8 mln/s to 10.3 mln/s (+28%!), and for uretprobes from 5.3 mln/s to 5.8 mln/s (+11%), as we have more work to do on uretprobes side. Even single-thread (no contention) performance is slightly better: 3.276 mln/s to 3.396 mln/s (+3.5%) for uprobes, and 2.055 mln/s to 2.174 mln/s (+5.8%) for uretprobes. We also select TASKS_TRACE_RCU for UPROBES in Kconfig due to the new dependency. Signed-off-by: Andrii Nakryiko Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Oleg Nesterov Link: https://lkml.kernel.org/r/20240910174312.3646590-1-andrii@kernel.org --- arch/Kconfig | 1 + kernel/events/uprobes.c | 38 ++++++++++++++++---------------------- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 8af374ea1adc..5b5c21676a0f 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -135,6 +135,7 @@ config KPROBES_ON_FTRACE config UPROBES def_bool n depends on ARCH_SUPPORTS_UPROBES + select TASKS_TRACE_RCU help Uprobes is the user-space counterpart to kprobes: they enable instrumentation applications (such as 'perf probe') diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 4b52cb2ae6d6..5106dc181387 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -26,6 +26,7 @@ #include #include #include +#include #include @@ -42,8 +43,6 @@ static struct rb_root uprobes_tree = RB_ROOT; static DEFINE_RWLOCK(uprobes_treelock); /* serialize rbtree access */ static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock); -DEFINE_STATIC_SRCU(uprobes_srcu); - #define UPROBES_HASH_SZ 13 /* serialize uprobe->pending_list */ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; @@ -651,7 +650,7 @@ static void put_uprobe(struct uprobe *uprobe) delayed_uprobe_remove(uprobe, NULL); mutex_unlock(&delayed_uprobe_lock); - call_srcu(&uprobes_srcu, &uprobe->rcu, uprobe_free_rcu); + call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu); } static __always_inline @@ -706,7 +705,7 @@ static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset) struct rb_node *node; unsigned int seq; - lockdep_assert(srcu_read_lock_held(&uprobes_srcu)); + lockdep_assert(rcu_read_lock_trace_held()); do { seq = read_seqcount_begin(&uprobes_seqcount); @@ -934,8 +933,7 @@ static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm) bool ret = false; down_read(&uprobe->consumer_rwsem); - list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, - srcu_read_lock_held(&uprobes_srcu)) { + list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { ret = consumer_filter(uc, mm); if (ret) break; @@ -1156,7 +1154,7 @@ void uprobe_unregister_sync(void) * unlucky enough caller can free consumer's memory and cause * handler_chain() or handle_uretprobe_chain() to do an use-after-free. */ - synchronize_srcu(&uprobes_srcu); + synchronize_rcu_tasks_trace(); } EXPORT_SYMBOL_GPL(uprobe_unregister_sync); @@ -1240,19 +1238,18 @@ EXPORT_SYMBOL_GPL(uprobe_register); int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add) { struct uprobe_consumer *con; - int ret = -ENOENT, srcu_idx; + int ret = -ENOENT; down_write(&uprobe->register_rwsem); - srcu_idx = srcu_read_lock(&uprobes_srcu); - list_for_each_entry_srcu(con, &uprobe->consumers, cons_node, - srcu_read_lock_held(&uprobes_srcu)) { + rcu_read_lock_trace(); + list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { if (con == uc) { ret = register_for_each_vma(uprobe, add ? uc : NULL); break; } } - srcu_read_unlock(&uprobes_srcu, srcu_idx); + rcu_read_unlock_trace(); up_write(&uprobe->register_rwsem); @@ -2134,8 +2131,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) current->utask->auprobe = &uprobe->arch; - list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, - srcu_read_lock_held(&uprobes_srcu)) { + list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { int rc = 0; if (uc->handler) { @@ -2173,15 +2169,13 @@ handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) { struct uprobe *uprobe = ri->uprobe; struct uprobe_consumer *uc; - int srcu_idx; - srcu_idx = srcu_read_lock(&uprobes_srcu); - list_for_each_entry_srcu(uc, &uprobe->consumers, cons_node, - srcu_read_lock_held(&uprobes_srcu)) { + rcu_read_lock_trace(); + list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { if (uc->ret_handler) uc->ret_handler(uc, ri->func, regs); } - srcu_read_unlock(&uprobes_srcu, srcu_idx); + rcu_read_unlock_trace(); } static struct return_instance *find_next_ret_chain(struct return_instance *ri) @@ -2266,13 +2260,13 @@ static void handle_swbp(struct pt_regs *regs) { struct uprobe *uprobe; unsigned long bp_vaddr; - int is_swbp, srcu_idx; + int is_swbp; bp_vaddr = uprobe_get_swbp_addr(regs); if (bp_vaddr == uprobe_get_trampoline_vaddr()) return uprobe_handle_trampoline(regs); - srcu_idx = srcu_read_lock(&uprobes_srcu); + rcu_read_lock_trace(); uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp); if (!uprobe) { @@ -2330,7 +2324,7 @@ static void handle_swbp(struct pt_regs *regs) out: /* arch_uprobe_skip_sstep() succeeded, or restart if can't singlestep */ - srcu_read_unlock(&uprobes_srcu, srcu_idx); + rcu_read_unlock_trace(); } /* From 79390db9eb32b2ba63c6be9fb83f12617259011d Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 20 Aug 2024 07:38:50 +0000 Subject: [PATCH 02/21] perf/x86: Refine hybrid_pmu_type defination Use macros instead of magic number to define hybrid_pmu_type and remove X86_HYBRID_NUM_PMUS since it's never used. Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Tested-by: Yongwei Ma Link: https://lkml.kernel.org/r/20240820073853.1974746-2-dapeng1.mi@linux.intel.com --- arch/x86/events/perf_event.h | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index ac1182141bf6..fdd7d0369d42 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -674,18 +674,16 @@ enum hybrid_cpu_type { HYBRID_INTEL_CORE = 0x40, }; -enum hybrid_pmu_type { - not_hybrid, - hybrid_small = BIT(0), - hybrid_big = BIT(1), - - hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ -}; - #define X86_HYBRID_PMU_ATOM_IDX 0 #define X86_HYBRID_PMU_CORE_IDX 1 -#define X86_HYBRID_NUM_PMUS 2 +enum hybrid_pmu_type { + not_hybrid, + hybrid_small = BIT(X86_HYBRID_PMU_ATOM_IDX), + hybrid_big = BIT(X86_HYBRID_PMU_CORE_IDX), + + hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ +}; struct x86_hybrid_pmu { struct pmu pmu; From 2eb2802a41a222bf8d78a88f193ce665071c869e Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 20 Aug 2024 07:38:51 +0000 Subject: [PATCH 03/21] x86/cpu/intel: Define helper to get CPU core native ID Define helper get_this_hybrid_cpu_native_id() to return the CPU core native ID. This core native ID combining with core type can be used to figure out the CPU core uarch uniquely. Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Tested-by: Yongwei Ma Link: https://lkml.kernel.org/r/20240820073853.1974746-3-dapeng1.mi@linux.intel.com --- arch/x86/include/asm/cpu.h | 6 ++++++ arch/x86/kernel/cpu/intel.c | 15 +++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index aa30fd8cad7f..5af69b5be2fb 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -32,6 +32,7 @@ extern bool handle_user_split_lock(struct pt_regs *regs, long error_code); extern bool handle_guest_split_lock(unsigned long ip); extern void handle_bus_lock(struct pt_regs *regs); u8 get_this_hybrid_cpu_type(void); +u32 get_this_hybrid_cpu_native_id(void); #else static inline void __init sld_setup(struct cpuinfo_x86 *c) {} static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code) @@ -50,6 +51,11 @@ static inline u8 get_this_hybrid_cpu_type(void) { return 0; } + +static inline u32 get_this_hybrid_cpu_native_id(void) +{ + return 0; +} #endif #ifdef CONFIG_IA32_FEAT_CTL void init_ia32_feat_ctl(struct cpuinfo_x86 *c); diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index e7656cbef68d..624397e43ac6 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1299,3 +1299,18 @@ u8 get_this_hybrid_cpu_type(void) return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT; } + +/** + * get_this_hybrid_cpu_native_id() - Get the native id of this hybrid CPU + * + * Returns the uarch native ID [23:0] of a CPU in a hybrid processor. + * If the processor is not hybrid, returns 0. + */ +u32 get_this_hybrid_cpu_native_id(void) +{ + if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) + return 0; + + return cpuid_eax(0x0000001a) & + (BIT_ULL(X86_HYBRID_CPU_TYPE_ID_SHIFT) - 1); +} From 9f4a39757c81d532f64232702537c53ad4092a5e Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 20 Aug 2024 07:38:52 +0000 Subject: [PATCH 04/21] perf/x86/intel: Support hybrid PMU with multiple atom uarchs The upcoming ARL-H hybrid processor contains 2 different atom uarchs which have different PMU capabilities. To distinguish these atom uarchs, CPUID.1AH.EAX[23:0] defines a native model ID which can be used to uniquely identify the uarch of the core by combining with core type. Thus a 3rd hybrid pmu type "hybrid_tiny" is defined to mark the 2nd atom uarch. The helper find_hybrid_pmu_for_cpu() would compare the hybrid pmu type and dynamically read core native id from cpu to identify the corresponding hybrid pmu structure. Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Tested-by: Yongwei Ma Link: https://lkml.kernel.org/r/20240820073853.1974746-4-dapeng1.mi@linux.intel.com --- arch/x86/events/intel/core.c | 28 +++++++++++++++++++--------- arch/x86/events/perf_event.h | 18 +++++++++++++++++- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index d879478db3f5..6b9884ee96e9 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4924,17 +4924,26 @@ static struct x86_hybrid_pmu *find_hybrid_pmu_for_cpu(void) /* * This essentially just maps between the 'hybrid_cpu_type' - * and 'hybrid_pmu_type' enums: + * and 'hybrid_pmu_type' enums except for ARL-H processor + * which needs to compare atom uarch native id since ARL-H + * contains two different atom uarchs. */ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { enum hybrid_pmu_type pmu_type = x86_pmu.hybrid_pmu[i].pmu_type; + u32 native_id; - if (cpu_type == HYBRID_INTEL_CORE && - pmu_type == hybrid_big) - return &x86_pmu.hybrid_pmu[i]; - if (cpu_type == HYBRID_INTEL_ATOM && - pmu_type == hybrid_small) + if (cpu_type == HYBRID_INTEL_CORE && pmu_type == hybrid_big) return &x86_pmu.hybrid_pmu[i]; + if (cpu_type == HYBRID_INTEL_ATOM) { + if (x86_pmu.num_hybrid_pmus == 2 && pmu_type == hybrid_small) + return &x86_pmu.hybrid_pmu[i]; + + native_id = get_this_hybrid_cpu_native_id(); + if (native_id == skt_native_id && pmu_type == hybrid_small) + return &x86_pmu.hybrid_pmu[i]; + if (native_id == cmt_native_id && pmu_type == hybrid_tiny) + return &x86_pmu.hybrid_pmu[i]; + } } return NULL; @@ -6238,8 +6247,9 @@ static inline int intel_pmu_v6_addr_offset(int index, bool eventsel) } static const struct { enum hybrid_pmu_type id; char *name; } intel_hybrid_pmu_type_map[] __initconst = { - { hybrid_small, "cpu_atom" }, - { hybrid_big, "cpu_core" }, + { hybrid_small, "cpu_atom" }, + { hybrid_big, "cpu_core" }, + { hybrid_tiny, "cpu_lowpower" }, }; static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) @@ -6272,7 +6282,7 @@ static __always_inline int intel_pmu_init_hybrid(enum hybrid_pmu_type pmus) 0, x86_pmu_num_counters(&pmu->pmu), 0, 0); pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities; - if (pmu->pmu_type & hybrid_small) { + if (pmu->pmu_type & hybrid_small_tiny) { pmu->intel_cap.perf_metrics = 0; pmu->intel_cap.pebs_output_pt_available = 1; pmu->mid_ack = true; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index fdd7d0369d42..909467d6d7ed 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -668,6 +668,12 @@ enum { #define PERF_PEBS_DATA_SOURCE_GRT_MAX 0x10 #define PERF_PEBS_DATA_SOURCE_GRT_MASK (PERF_PEBS_DATA_SOURCE_GRT_MAX - 1) +/* + * CPUID.1AH.EAX[31:0] uniquely identifies the microarchitecture + * of the core. Bits 31-24 indicates its core type (Core or Atom) + * and Bits [23:0] indicates the native model ID of the core. + * Core type and native model ID are defined in below enumerations. + */ enum hybrid_cpu_type { HYBRID_INTEL_NONE, HYBRID_INTEL_ATOM = 0x20, @@ -676,13 +682,23 @@ enum hybrid_cpu_type { #define X86_HYBRID_PMU_ATOM_IDX 0 #define X86_HYBRID_PMU_CORE_IDX 1 +#define X86_HYBRID_PMU_TINY_IDX 2 enum hybrid_pmu_type { not_hybrid, hybrid_small = BIT(X86_HYBRID_PMU_ATOM_IDX), hybrid_big = BIT(X86_HYBRID_PMU_CORE_IDX), + hybrid_tiny = BIT(X86_HYBRID_PMU_TINY_IDX), - hybrid_big_small = hybrid_big | hybrid_small, /* only used for matching */ + /* The belows are only used for matching */ + hybrid_big_small = hybrid_big | hybrid_small, + hybrid_small_tiny = hybrid_small | hybrid_tiny, + hybrid_big_small_tiny = hybrid_big | hybrid_small_tiny, +}; + +enum atom_native_id { + cmt_native_id = 0x2, /* Crestmont */ + skt_native_id = 0x3, /* Skymont */ }; struct x86_hybrid_pmu { From d3fe6f0a4372702e2cdabf19e03b815811671c7a Mon Sep 17 00:00:00 2001 From: Dapeng Mi Date: Tue, 20 Aug 2024 07:38:53 +0000 Subject: [PATCH 05/21] perf/x86/intel: Add PMU support for ArrowLake-H ArrowLake-H contains 3 different uarchs, LionCove, Skymont and Crestmont. It is different with previous hybrid processors which only contains two kinds of uarchs. This patch adds PMU support for ArrowLake-H processor, adds ARL-H specific events which supports the 3 kinds of uarchs, such as td_retiring_arl_h, and extends some existed format attributes like offcore_rsp to make them be available to support ARL-H as well. Althrough these format attributes like offcore_rsp have been extended to support ARL-H, they can still support the regular hybrid platforms with 2 kinds of uarchs since the helper hybrid_format_is_visible() would filter PMU types and only show the format attribute for available PMUs. Signed-off-by: Dapeng Mi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kan Liang Tested-by: Yongwei Ma Link: https://lkml.kernel.org/r/20240820073853.1974746-5-dapeng1.mi@linux.intel.com --- arch/x86/events/intel/core.c | 105 ++++++++++++++++++++++++++++++++++- arch/x86/events/intel/ds.c | 21 +++++++ arch/x86/events/perf_event.h | 4 ++ 3 files changed, 127 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 6b9884ee96e9..7ca40002a19b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4599,6 +4599,28 @@ static inline bool erratum_hsw11(struct perf_event *event) X86_CONFIG(.event=0xc0, .umask=0x01); } +static struct event_constraint * +arl_h_get_event_constraints(struct cpu_hw_events *cpuc, int idx, + struct perf_event *event) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + + if (pmu->pmu_type == hybrid_tiny) + return cmt_get_event_constraints(cpuc, idx, event); + + return mtl_get_event_constraints(cpuc, idx, event); +} + +static int arl_h_hw_config(struct perf_event *event) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + + if (pmu->pmu_type == hybrid_tiny) + return intel_pmu_hw_config(event); + + return adl_hw_config(event); +} + /* * The HSW11 requires a period larger than 100 which is the same as the BDM11. * A minimum period of 128 is enforced as well for the INST_RETIRED.ALL. @@ -5974,6 +5996,37 @@ static struct attribute *lnl_hybrid_events_attrs[] = { NULL }; +/* The event string must be in PMU IDX order. */ +EVENT_ATTR_STR_HYBRID(topdown-retiring, + td_retiring_arl_h, + "event=0xc2,umask=0x02;event=0x00,umask=0x80;event=0xc2,umask=0x0", + hybrid_big_small_tiny); +EVENT_ATTR_STR_HYBRID(topdown-bad-spec, + td_bad_spec_arl_h, + "event=0x73,umask=0x0;event=0x00,umask=0x81;event=0x73,umask=0x0", + hybrid_big_small_tiny); +EVENT_ATTR_STR_HYBRID(topdown-fe-bound, + td_fe_bound_arl_h, + "event=0x9c,umask=0x01;event=0x00,umask=0x82;event=0x71,umask=0x0", + hybrid_big_small_tiny); +EVENT_ATTR_STR_HYBRID(topdown-be-bound, + td_be_bound_arl_h, + "event=0xa4,umask=0x02;event=0x00,umask=0x83;event=0x74,umask=0x0", + hybrid_big_small_tiny); + +static struct attribute *arl_h_hybrid_events_attrs[] = { + EVENT_PTR(slots_adl), + EVENT_PTR(td_retiring_arl_h), + EVENT_PTR(td_bad_spec_arl_h), + EVENT_PTR(td_fe_bound_arl_h), + EVENT_PTR(td_be_bound_arl_h), + EVENT_PTR(td_heavy_ops_adl), + EVENT_PTR(td_br_mis_adl), + EVENT_PTR(td_fetch_lat_adl), + EVENT_PTR(td_mem_bound_adl), + NULL, +}; + /* Must be in IDX order */ EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small); EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small); @@ -5992,6 +6045,21 @@ static struct attribute *mtl_hybrid_mem_attrs[] = { NULL }; +EVENT_ATTR_STR_HYBRID(mem-loads, + mem_ld_arl_h, + "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3;event=0xd0,umask=0x5,ldlat=3", + hybrid_big_small_tiny); +EVENT_ATTR_STR_HYBRID(mem-stores, + mem_st_arl_h, + "event=0xd0,umask=0x6;event=0xcd,umask=0x2;event=0xd0,umask=0x6", + hybrid_big_small_tiny); + +static struct attribute *arl_h_hybrid_mem_attrs[] = { + EVENT_PTR(mem_ld_arl_h), + EVENT_PTR(mem_st_arl_h), + NULL, +}; + EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big); EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big); EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big); @@ -6015,8 +6083,8 @@ static struct attribute *adl_hybrid_tsx_attrs[] = { FORMAT_ATTR_HYBRID(in_tx, hybrid_big); FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big); -FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small); -FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small); +FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small_tiny); +FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small_tiny); FORMAT_ATTR_HYBRID(frontend, hybrid_big); #define ADL_HYBRID_RTM_FORMAT_ATTR \ @@ -6039,7 +6107,7 @@ static struct attribute *adl_hybrid_extra_attr[] = { NULL }; -FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small); +FORMAT_ATTR_HYBRID(snoop_rsp, hybrid_small_tiny); static struct attribute *mtl_hybrid_extra_attr_rtm[] = { ADL_HYBRID_RTM_FORMAT_ATTR, @@ -7121,6 +7189,37 @@ __init int intel_pmu_init(void) name = "lunarlake_hybrid"; break; + case INTEL_ARROWLAKE_H: + intel_pmu_init_hybrid(hybrid_big_small_tiny); + + x86_pmu.pebs_latency_data = arl_h_latency_data; + x86_pmu.get_event_constraints = arl_h_get_event_constraints; + x86_pmu.hw_config = arl_h_hw_config; + + td_attr = arl_h_hybrid_events_attrs; + mem_attr = arl_h_hybrid_mem_attrs; + tsx_attr = adl_hybrid_tsx_attrs; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr; + + /* Initialize big core specific PerfMon capabilities. */ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX]; + intel_pmu_init_lnc(&pmu->pmu); + + /* Initialize Atom core specific PerfMon capabilities. */ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX]; + intel_pmu_init_skt(&pmu->pmu); + + /* Initialize Lower Power Atom specific PerfMon capabilities. */ + pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX]; + intel_pmu_init_grt(&pmu->pmu); + pmu->extra_regs = intel_cmt_extra_regs; + + intel_pmu_pebs_data_source_arl_h(); + pr_cont("ArrowLake-H Hybrid events, "); + name = "arrowlake_h_hybrid"; + break; + default: switch (x86_pmu.version) { case 1: diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index fa5ea65de0d0..8afc4ad3cd16 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -177,6 +177,17 @@ void __init intel_pmu_pebs_data_source_mtl(void) __intel_pmu_pebs_data_source_cmt(data_source); } +void __init intel_pmu_pebs_data_source_arl_h(void) +{ + u64 *data_source; + + intel_pmu_pebs_data_source_lnl(); + + data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_TINY_IDX].pebs_data_source; + memcpy(data_source, pebs_data_source, sizeof(pebs_data_source)); + __intel_pmu_pebs_data_source_cmt(data_source); +} + void __init intel_pmu_pebs_data_source_cmt(void) { __intel_pmu_pebs_data_source_cmt(pebs_data_source); @@ -388,6 +399,16 @@ u64 lnl_latency_data(struct perf_event *event, u64 status) return lnc_latency_data(event, status); } +u64 arl_h_latency_data(struct perf_event *event, u64 status) +{ + struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu); + + if (pmu->pmu_type == hybrid_tiny) + return cmt_latency_data(event, status); + + return lnl_latency_data(event, status); +} + static u64 load_latency_data(struct perf_event *event, u64 status) { union intel_x86_pebs_dse dse; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 909467d6d7ed..82c6f45ce975 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1592,6 +1592,8 @@ u64 cmt_latency_data(struct perf_event *event, u64 status); u64 lnl_latency_data(struct perf_event *event, u64 status); +u64 arl_h_latency_data(struct perf_event *event, u64 status); + extern struct event_constraint intel_core2_pebs_event_constraints[]; extern struct event_constraint intel_atom_pebs_event_constraints[]; @@ -1711,6 +1713,8 @@ void intel_pmu_pebs_data_source_grt(void); void intel_pmu_pebs_data_source_mtl(void); +void intel_pmu_pebs_data_source_arl_h(void); + void intel_pmu_pebs_data_source_cmt(void); void intel_pmu_pebs_data_source_lnl(void); From b302d5a6fff5dd7ddb1e4752d60c0eaa4cc4f7f3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:30 +0200 Subject: [PATCH 06/21] uprobes: don't abuse get_utask() in pre_ssout() and prepare_uretprobe() handle_swbp() calls get_utask() before prepare_uretprobe() or pre_ssout() can be called, they can simply use current->utask which can't be NULL. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144230.GA9468@redhat.com --- kernel/events/uprobes.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 5106dc181387..15e91a38d327 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1905,18 +1905,14 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained, static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) { - struct return_instance *ri; - struct uprobe_task *utask; + struct uprobe_task *utask = current->utask; unsigned long orig_ret_vaddr, trampoline_vaddr; + struct return_instance *ri; bool chained; if (!get_xol_area()) return; - utask = get_utask(); - if (!utask) - return; - if (utask->depth >= MAX_URETPROBE_DEPTH) { printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" " nestedness limit pid/tgid=%d/%d\n", @@ -1977,14 +1973,10 @@ fail: static int pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) { - struct uprobe_task *utask; + struct uprobe_task *utask = current->utask; unsigned long xol_vaddr; int err; - utask = get_utask(); - if (!utask) - return -ENOMEM; - if (!try_get_uprobe(uprobe)) return -EINVAL; From c7b4133c48445dde789ed30b19ccb0448c7593f7 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:35 +0200 Subject: [PATCH 07/21] uprobes: sanitiize xol_free_insn_slot() 1. Clear utask->xol_vaddr unconditionally, even if this addr is not valid, xol_free_insn_slot() should never return with utask->xol_vaddr != NULL. 2. Add a comment to explain why do we need to validate slot_addr. 3. Simplify the validation above. We can simply check offset < PAGE_SIZE, unsigned underflows are fine, it should work if slot_addr < area->vaddr. 4. Kill the unnecessary "slot_nr >= UINSNS_PER_PAGE" check, slot_nr must be valid if offset < PAGE_SIZE. The next patches will cleanup this function even more. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144235.GA9471@redhat.com --- kernel/events/uprobes.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 15e91a38d327..3f38be1e736b 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1680,8 +1680,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) static void xol_free_insn_slot(struct task_struct *tsk) { struct xol_area *area; - unsigned long vma_end; unsigned long slot_addr; + unsigned long offset; if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) return; @@ -1690,24 +1690,21 @@ static void xol_free_insn_slot(struct task_struct *tsk) if (unlikely(!slot_addr)) return; + tsk->utask->xol_vaddr = 0; area = tsk->mm->uprobes_state.xol_area; - vma_end = area->vaddr + PAGE_SIZE; - if (area->vaddr <= slot_addr && slot_addr < vma_end) { - unsigned long offset; - int slot_nr; - - offset = slot_addr - area->vaddr; - slot_nr = offset / UPROBE_XOL_SLOT_BYTES; - if (slot_nr >= UINSNS_PER_PAGE) - return; + offset = slot_addr - area->vaddr; + /* + * slot_addr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). + * This check can only fail if the "[uprobes]" vma was mremap'ed. + */ + if (offset < PAGE_SIZE) { + int slot_nr = offset / UPROBE_XOL_SLOT_BYTES; clear_bit(slot_nr, area->bitmap); atomic_dec(&area->slot_count); smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ if (waitqueue_active(&area->wq)) wake_up(&area->wq); - - tsk->utask->xol_vaddr = 0; } } From 430af825ba991730f8acc3c804a4aef82e9f7ff6 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:39 +0200 Subject: [PATCH 08/21] uprobes: kill the unnecessary put_uprobe/xol_free_insn_slot in uprobe_free_utask() If pre_ssout() succeeds and sets utask->active_uprobe and utask->xol_vaddr the task must not exit until it calls handle_singlestep() which does the necessary put_uprobe() and xol_free_insn_slot(). Remove put_uprobe() and xol_free_insn_slot() from uprobe_free_utask(). With this change xol_free_insn_slot() can't hit xol_area/utask/xol_vaddr == NULL, we can kill the unnecessary checks checks and simplify this function more. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144239.GA9475@redhat.com --- kernel/events/uprobes.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 3f38be1e736b..03035a859a56 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1673,28 +1673,16 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) } /* - * xol_free_insn_slot - If slot was earlier allocated by - * @xol_get_insn_slot(), make the slot available for - * subsequent requests. + * xol_free_insn_slot - free the slot allocated by xol_get_insn_slot() */ static void xol_free_insn_slot(struct task_struct *tsk) { - struct xol_area *area; - unsigned long slot_addr; - unsigned long offset; - - if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask) - return; - - slot_addr = tsk->utask->xol_vaddr; - if (unlikely(!slot_addr)) - return; + struct xol_area *area = tsk->mm->uprobes_state.xol_area; + unsigned long offset = tsk->utask->xol_vaddr - area->vaddr; tsk->utask->xol_vaddr = 0; - area = tsk->mm->uprobes_state.xol_area; - offset = slot_addr - area->vaddr; /* - * slot_addr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). + * xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). * This check can only fail if the "[uprobes]" vma was mremap'ed. */ if (offset < PAGE_SIZE) { @@ -1764,14 +1752,12 @@ void uprobe_free_utask(struct task_struct *t) if (!utask) return; - if (utask->active_uprobe) - put_uprobe(utask->active_uprobe); + WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr); ri = utask->return_instances; while (ri) ri = free_ret_instance(ri); - xol_free_insn_slot(t); kfree(utask); t->utask = NULL; } From 6ffe8c7d871b327d16ae6b6f1db4c8ecb0f15c64 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:44 +0200 Subject: [PATCH 09/21] uprobes: simplify xol_take_insn_slot() and its caller The do / while (slot_nr >= UINSNS_PER_PAGE) loop in xol_take_insn_slot() makes no sense, the checked condition is always true. Change this code to use the "for (;;)" loop, this way we do not need to change slot_nr if test_and_set_bit() fails. Also, kill the unnecessary xol_vaddr != NULL check in xol_get_insn_slot(), xol_take_insn_slot() never returns NULL. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144244.GA9480@redhat.com --- kernel/events/uprobes.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 03035a859a56..616b5ff3fd85 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1628,25 +1628,20 @@ void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) */ static unsigned long xol_take_insn_slot(struct xol_area *area) { - unsigned long slot_addr; - int slot_nr; + unsigned int slot_nr; - do { + for (;;) { slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); if (slot_nr < UINSNS_PER_PAGE) { if (!test_and_set_bit(slot_nr, area->bitmap)) break; - - slot_nr = UINSNS_PER_PAGE; continue; } wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); - } while (slot_nr >= UINSNS_PER_PAGE); + } - slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES); atomic_inc(&area->slot_count); - - return slot_addr; + return area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; } /* @@ -1663,12 +1658,8 @@ static unsigned long xol_get_insn_slot(struct uprobe *uprobe) return 0; xol_vaddr = xol_take_insn_slot(area); - if (unlikely(!xol_vaddr)) - return 0; - arch_uprobe_copy_ixol(area->page, xol_vaddr, &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); - return xol_vaddr; } From 1cee988c1d21eabc936d1401811012522083e36f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:48 +0200 Subject: [PATCH 10/21] uprobes: move the initialization of utask->xol_vaddr from pre_ssout() to xol_get_insn_slot() This simplifies the code and makes xol_get_insn_slot() symmetric with xol_free_insn_slot() which clears utask->xol_vaddr. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144248.GA9483@redhat.com --- kernel/events/uprobes.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 616b5ff3fd85..dfaca306443d 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1646,21 +1646,19 @@ static unsigned long xol_take_insn_slot(struct xol_area *area) /* * xol_get_insn_slot - allocate a slot for xol. - * Returns the allocated slot address or 0. */ -static unsigned long xol_get_insn_slot(struct uprobe *uprobe) +static bool xol_get_insn_slot(struct uprobe *uprobe) { - struct xol_area *area; - unsigned long xol_vaddr; + struct uprobe_task *utask = current->utask; + struct xol_area *area = get_xol_area(); - area = get_xol_area(); if (!area) - return 0; + return false; - xol_vaddr = xol_take_insn_slot(area); - arch_uprobe_copy_ixol(area->page, xol_vaddr, + utask->xol_vaddr = xol_take_insn_slot(area); + arch_uprobe_copy_ixol(area->page, utask->xol_vaddr, &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); - return xol_vaddr; + return true; } /* @@ -1948,21 +1946,17 @@ static int pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) { struct uprobe_task *utask = current->utask; - unsigned long xol_vaddr; int err; if (!try_get_uprobe(uprobe)) return -EINVAL; - xol_vaddr = xol_get_insn_slot(uprobe); - if (!xol_vaddr) { + if (!xol_get_insn_slot(uprobe)) { err = -ENOMEM; goto err_out; } - utask->xol_vaddr = xol_vaddr; utask->vaddr = bp_vaddr; - err = arch_uprobe_pre_xol(&uprobe->arch, regs); if (unlikely(err)) { xol_free_insn_slot(current); From c5356ab1db28cafc448a50c26ba84442237abb98 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:53 +0200 Subject: [PATCH 11/21] uprobes: pass utask to xol_get_insn_slot() and xol_free_insn_slot() Add the "struct uprobe_task *utask" argument to xol_get_insn_slot() and xol_free_insn_slot(), their callers already have it so we can avoid the unnecessary dereference and simplify the code. Kill the "tsk" argument of xol_free_insn_slot(), it is always current. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144253.GA9487@redhat.com --- kernel/events/uprobes.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index dfaca306443d..c9f1e1e56b15 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1647,9 +1647,8 @@ static unsigned long xol_take_insn_slot(struct xol_area *area) /* * xol_get_insn_slot - allocate a slot for xol. */ -static bool xol_get_insn_slot(struct uprobe *uprobe) +static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask) { - struct uprobe_task *utask = current->utask; struct xol_area *area = get_xol_area(); if (!area) @@ -1664,12 +1663,12 @@ static bool xol_get_insn_slot(struct uprobe *uprobe) /* * xol_free_insn_slot - free the slot allocated by xol_get_insn_slot() */ -static void xol_free_insn_slot(struct task_struct *tsk) +static void xol_free_insn_slot(struct uprobe_task *utask) { - struct xol_area *area = tsk->mm->uprobes_state.xol_area; - unsigned long offset = tsk->utask->xol_vaddr - area->vaddr; + struct xol_area *area = current->mm->uprobes_state.xol_area; + unsigned long offset = utask->xol_vaddr - area->vaddr; - tsk->utask->xol_vaddr = 0; + utask->xol_vaddr = 0; /* * xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). * This check can only fail if the "[uprobes]" vma was mremap'ed. @@ -1951,7 +1950,7 @@ pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) if (!try_get_uprobe(uprobe)) return -EINVAL; - if (!xol_get_insn_slot(uprobe)) { + if (!xol_get_insn_slot(uprobe, utask)) { err = -ENOMEM; goto err_out; } @@ -1959,7 +1958,7 @@ pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr) utask->vaddr = bp_vaddr; err = arch_uprobe_pre_xol(&uprobe->arch, regs); if (unlikely(err)) { - xol_free_insn_slot(current); + xol_free_insn_slot(utask); goto err_out; } @@ -2307,7 +2306,7 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) put_uprobe(uprobe); utask->active_uprobe = NULL; utask->state = UTASK_RUNNING; - xol_free_insn_slot(current); + xol_free_insn_slot(utask); spin_lock_irq(¤t->sighand->siglock); recalc_sigpending(); /* see uprobe_deny_signal() */ From c16e2fdd746c78f5b2ce3c2ab8a26a61b6ed09e5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 29 Sep 2024 16:42:58 +0200 Subject: [PATCH 12/21] uprobes: deny mremap(xol_vma) kernel/events/uprobes.c assumes that xol_area->vaddr is always correct but a malicious application can remap its "[uprobes]" vma to another adress to confuse the kernel. Introduce xol_mremap() to make this impossible. With this change utask->xol_vaddr in xol_free_insn_slot() can't be invalid, we can turn the offset check into WARN_ON_ONCE(offset >= PAGE_SIZE). Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240929144258.GA9492@redhat.com --- kernel/events/uprobes.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index c9f1e1e56b15..d3538b6c0831 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1472,9 +1472,15 @@ static vm_fault_t xol_fault(const struct vm_special_mapping *sm, return 0; } +static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) +{ + return -EPERM; +} + static const struct vm_special_mapping xol_mapping = { .name = "[uprobes]", .fault = xol_fault, + .mremap = xol_mremap, }; /* Slot allocation for XOL */ @@ -1667,21 +1673,19 @@ static void xol_free_insn_slot(struct uprobe_task *utask) { struct xol_area *area = current->mm->uprobes_state.xol_area; unsigned long offset = utask->xol_vaddr - area->vaddr; + unsigned int slot_nr; utask->xol_vaddr = 0; - /* - * xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE). - * This check can only fail if the "[uprobes]" vma was mremap'ed. - */ - if (offset < PAGE_SIZE) { - int slot_nr = offset / UPROBE_XOL_SLOT_BYTES; + /* xol_vaddr must fit into [area->vaddr, area->vaddr + PAGE_SIZE) */ + if (WARN_ON_ONCE(offset >= PAGE_SIZE)) + return; - clear_bit(slot_nr, area->bitmap); - atomic_dec(&area->slot_count); - smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ - if (waitqueue_active(&area->wq)) - wake_up(&area->wq); - } + slot_nr = offset / UPROBE_XOL_SLOT_BYTES; + clear_bit(slot_nr, area->bitmap); + atomic_dec(&area->slot_count); + smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ + if (waitqueue_active(&area->wq)) + wake_up(&area->wq); } void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, From 7a166094bd2b1c084fd215747f9cd05a853d66c9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 1 Oct 2024 16:24:59 +0200 Subject: [PATCH 13/21] uprobes: kill xol_area->slot_count Add the new helper, xol_get_slot_nr() which does find_first_zero_bit() + test_and_set_bit(). xol_take_insn_slot() can wait for the "xol_get_slot_nr() < UINSNS_PER_PAGE" event instead of "area->slot_count < UINSNS_PER_PAGE". So we can kill area->slot_count and avoid atomic_inc() + atomic_dec(), this simplifies the code and can slightly improve the performance. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20241001142458.GA13629@redhat.com --- kernel/events/uprobes.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d3538b6c0831..a1c801e8333c 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -99,7 +99,6 @@ static LIST_HEAD(delayed_uprobe_list); */ struct xol_area { wait_queue_head_t wq; /* if all slots are busy */ - atomic_t slot_count; /* number of in-use slots */ unsigned long *bitmap; /* 0 = free slot */ struct page *page; @@ -1556,7 +1555,6 @@ static struct xol_area *__create_xol_area(unsigned long vaddr) init_waitqueue_head(&area->wq); /* Reserve the 1st slot for get_trampoline_vaddr() */ set_bit(0, area->bitmap); - atomic_set(&area->slot_count, 1); insns = arch_uprobe_trampoline(&insns_size); arch_uprobe_copy_ixol(area->page, 0, insns, insns_size); @@ -1629,24 +1627,28 @@ void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) } } +static unsigned long xol_get_slot_nr(struct xol_area *area) +{ + unsigned long slot_nr; + + slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); + if (slot_nr < UINSNS_PER_PAGE) { + if (!test_and_set_bit(slot_nr, area->bitmap)) + return slot_nr; + } + + return UINSNS_PER_PAGE; +} + /* * - search for a free slot. */ static unsigned long xol_take_insn_slot(struct xol_area *area) { - unsigned int slot_nr; + unsigned long slot_nr; - for (;;) { - slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE); - if (slot_nr < UINSNS_PER_PAGE) { - if (!test_and_set_bit(slot_nr, area->bitmap)) - break; - continue; - } - wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); - } + wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE); - atomic_inc(&area->slot_count); return area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; } @@ -1682,7 +1684,6 @@ static void xol_free_insn_slot(struct uprobe_task *utask) slot_nr = offset / UPROBE_XOL_SLOT_BYTES; clear_bit(slot_nr, area->bitmap); - atomic_dec(&area->slot_count); smp_mb__after_atomic(); /* pairs with prepare_to_wait() */ if (waitqueue_active(&area->wq)) wake_up(&area->wq); From 6c74ca7aa81a23c613b8ca52bfe0a4b3734dd287 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 1 Oct 2024 16:25:03 +0200 Subject: [PATCH 14/21] uprobes: fold xol_take_insn_slot() into xol_get_insn_slot() After the previous change xol_take_insn_slot() becomes trivial, kill it. Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20241001142503.GA13633@redhat.com --- kernel/events/uprobes.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index a1c801e8333c..2a0059464383 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1640,29 +1640,20 @@ static unsigned long xol_get_slot_nr(struct xol_area *area) return UINSNS_PER_PAGE; } -/* - * - search for a free slot. - */ -static unsigned long xol_take_insn_slot(struct xol_area *area) -{ - unsigned long slot_nr; - - wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE); - - return area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; -} - /* * xol_get_insn_slot - allocate a slot for xol. */ static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask) { struct xol_area *area = get_xol_area(); + unsigned long slot_nr; if (!area) return false; - utask->xol_vaddr = xol_take_insn_slot(area); + wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE); + + utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES; arch_uprobe_copy_ixol(area->page, utask->xol_vaddr, &uprobe->arch.ixol, sizeof(uprobe->arch.ixol)); return true; From de20037e1b3c2f2ca97b8c12b8c7bca8abd509a7 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Tue, 1 Oct 2024 07:10:19 -0700 Subject: [PATCH 15/21] perf/x86/amd: Warn only on new bits set Warning at every leaking bits can cause a flood of message, triggering various stall-warning mechanisms to fire, including CSD locks, which makes the machine to be unusable. Track the bits that are being leaked, and only warn when a new bit is set. That said, this patch will help with the following issues: 1) It will tell us which bits are being set, so, it is easy to communicate it back to vendor, and to do a root-cause analyzes. 2) It avoid the machine to be unusable, because, worst case scenario, the user gets less than 60 WARNs (one per unhandled bit). Suggested-by: Paul E. McKenney Signed-off-by: Breno Leitao Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Sandipan Das Reviewed-by: Paul E. McKenney Link: https://lkml.kernel.org/r/20241001141020.2620361-1-leitao@debian.org --- arch/x86/events/amd/core.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 920e3a640cad..b4a1a2576510 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -943,11 +943,12 @@ static int amd_pmu_v2_snapshot_branch_stack(struct perf_branch_entry *entries, u static int amd_pmu_v2_handle_irq(struct pt_regs *regs) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + static atomic64_t status_warned = ATOMIC64_INIT(0); + u64 reserved, status, mask, new_bits, prev_bits; struct perf_sample_data data; struct hw_perf_event *hwc; struct perf_event *event; int handled = 0, idx; - u64 reserved, status, mask; bool pmu_enabled; /* @@ -1012,7 +1013,12 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) * the corresponding PMCs are expected to be inactive according to the * active_mask */ - WARN_ON(status > 0); + if (status > 0) { + prev_bits = atomic64_fetch_or(status, &status_warned); + // A new bit was set for the very first time. + new_bits = status & ~prev_bits; + WARN(new_bits, "New overflows for inactive PMCs: %llx\n", new_bits); + } /* Clear overflow and freeze bits */ amd_pmu_ack_global_status(~status); From da09a9e0c3eab164af950be44ee6bdea8527c3e5 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 18 Oct 2024 22:22:51 +0200 Subject: [PATCH 16/21] uprobe: Add data pointer to consumer handlers Adding data pointer to both entry and exit consumer handlers and all its users. The functionality itself is coming in following change. Signed-off-by: Jiri Olsa Signed-off-by: Peter Zijlstra (Intel) Acked-by: Oleg Nesterov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20241018202252.693462-2-jolsa@kernel.org --- include/linux/uprobes.h | 4 ++-- kernel/events/uprobes.c | 4 ++-- kernel/trace/bpf_trace.c | 6 ++++-- kernel/trace/trace_uprobe.c | 12 ++++++++---- .../testing/selftests/bpf/bpf_testmod/bpf_testmod.c | 2 +- 5 files changed, 17 insertions(+), 11 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 2b294bf1881f..bb265a632b91 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -37,10 +37,10 @@ struct uprobe_consumer { * for the current process. If filter() is omitted or returns true, * UPROBE_HANDLER_REMOVE is effectively ignored. */ - int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); + int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data); int (*ret_handler)(struct uprobe_consumer *self, unsigned long func, - struct pt_regs *regs); + struct pt_regs *regs, __u64 *data); bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm); struct list_head cons_node; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2a0059464383..6b44c386a5df 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -2090,7 +2090,7 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) int rc = 0; if (uc->handler) { - rc = uc->handler(uc, regs); + rc = uc->handler(uc, regs, NULL); WARN(rc & ~UPROBE_HANDLER_MASK, "bad rc=0x%x from %ps()\n", rc, uc->handler); } @@ -2128,7 +2128,7 @@ handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) rcu_read_lock_trace(); list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { if (uc->ret_handler) - uc->ret_handler(uc, ri->func, regs); + uc->ret_handler(uc, ri->func, regs, NULL); } rcu_read_unlock_trace(); } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a582cd25ca87..fdab7ecd8dfa 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -3244,7 +3244,8 @@ uprobe_multi_link_filter(struct uprobe_consumer *con, struct mm_struct *mm) } static int -uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs) +uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs, + __u64 *data) { struct bpf_uprobe *uprobe; @@ -3253,7 +3254,8 @@ uprobe_multi_link_handler(struct uprobe_consumer *con, struct pt_regs *regs) } static int -uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs) +uprobe_multi_link_ret_handler(struct uprobe_consumer *con, unsigned long func, struct pt_regs *regs, + __u64 *data) { struct bpf_uprobe *uprobe; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index c40531d2cbad..5895eabe3581 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -89,9 +89,11 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev) static int register_uprobe_event(struct trace_uprobe *tu); static int unregister_uprobe_event(struct trace_uprobe *tu); -static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); +static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs, + __u64 *data); static int uretprobe_dispatcher(struct uprobe_consumer *con, - unsigned long func, struct pt_regs *regs); + unsigned long func, struct pt_regs *regs, + __u64 *data); #ifdef CONFIG_STACK_GROWSUP static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n) @@ -1517,7 +1519,8 @@ trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, } } -static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) +static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs, + __u64 *data) { struct trace_uprobe *tu; struct uprobe_dispatch_data udd; @@ -1548,7 +1551,8 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) } static int uretprobe_dispatcher(struct uprobe_consumer *con, - unsigned long func, struct pt_regs *regs) + unsigned long func, struct pt_regs *regs, + __u64 *data) { struct trace_uprobe *tu; struct uprobe_dispatch_data udd; diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 8835761d9a12..12005e3dc3e4 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -461,7 +461,7 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { static int uprobe_ret_handler(struct uprobe_consumer *self, unsigned long func, - struct pt_regs *regs) + struct pt_regs *regs, __u64 *data) { regs->ax = 0x12345678deadbeef; From 4d756095d3994cb41393817dc696b458938a6bd0 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Fri, 18 Oct 2024 22:22:52 +0200 Subject: [PATCH 17/21] uprobe: Add support for session consumer This change allows the uprobe consumer to behave as session which means that 'handler' and 'ret_handler' callbacks are connected in a way that allows to: - control execution of 'ret_handler' from 'handler' callback - share data between 'handler' and 'ret_handler' callbacks The session concept fits to our common use case where we do filtering on entry uprobe and based on the result we decide to run the return uprobe (or not). It's also convenient to share the data between session callbacks. To achive this we are adding new return value the uprobe consumer can return from 'handler' callback: UPROBE_HANDLER_IGNORE - Ignore 'ret_handler' callback for this consumer. And store cookie and pass it to 'ret_handler' when consumer has both 'handler' and 'ret_handler' callbacks defined. We store shared data in the return_consumer object array as part of the return_instance object. This way the handle_uretprobe_chain can find related return_consumer and its shared data. We also store entry handler return value, for cases when there are multiple consumers on single uprobe and some of them are ignored and some of them not, in which case the return probe gets installed and we need to have a way to find out which consumer needs to be ignored. The tricky part is when consumer is registered 'after' the uprobe entry handler is hit. In such case this consumer's 'ret_handler' gets executed as well, but it won't have the proper data pointer set, so we can filter it out. Suggested-by: Oleg Nesterov Signed-off-by: Jiri Olsa Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Oleg Nesterov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20241018202252.693462-3-jolsa@kernel.org --- include/linux/uprobes.h | 21 +++++- kernel/events/uprobes.c | 148 ++++++++++++++++++++++++++++++++-------- 2 files changed, 139 insertions(+), 30 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index bb265a632b91..dbaf04189548 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -23,8 +23,17 @@ struct inode; struct notifier_block; struct page; +/* + * Allowed return values from uprobe consumer's handler callback + * with following meaning: + * + * UPROBE_HANDLER_REMOVE + * - Remove the uprobe breakpoint from current->mm. + * UPROBE_HANDLER_IGNORE + * - Ignore ret_handler callback for this consumer. + */ #define UPROBE_HANDLER_REMOVE 1 -#define UPROBE_HANDLER_MASK 1 +#define UPROBE_HANDLER_IGNORE 2 #define MAX_URETPROBE_DEPTH 64 @@ -44,6 +53,8 @@ struct uprobe_consumer { bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm); struct list_head cons_node; + + __u64 id; /* set when uprobe_consumer is registered */ }; #ifdef CONFIG_UPROBES @@ -83,14 +94,22 @@ struct uprobe_task { unsigned int depth; }; +struct return_consumer { + __u64 cookie; + __u64 id; +}; + struct return_instance { struct uprobe *uprobe; unsigned long func; unsigned long stack; /* stack pointer */ unsigned long orig_ret_vaddr; /* original return address */ bool chained; /* true, if instance is nested */ + int consumers_cnt; struct return_instance *next; /* keep as stack */ + + struct return_consumer consumers[] __counted_by(consumers_cnt); }; enum rp_check { diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 6b44c386a5df..4ef4b51776eb 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -64,7 +64,7 @@ struct uprobe { struct rcu_head rcu; loff_t offset; loff_t ref_ctr_offset; - unsigned long flags; + unsigned long flags; /* "unsigned long" so bitops work */ /* * The generic code assumes that it has two members of unknown type @@ -823,8 +823,11 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset, static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) { + static atomic64_t id; + down_write(&uprobe->consumer_rwsem); list_add_rcu(&uc->cons_node, &uprobe->consumers); + uc->id = (__u64) atomic64_inc_return(&id); up_write(&uprobe->consumer_rwsem); } @@ -1761,6 +1764,34 @@ static struct uprobe_task *get_utask(void) return current->utask; } +static size_t ri_size(int consumers_cnt) +{ + struct return_instance *ri; + + return sizeof(*ri) + sizeof(ri->consumers[0]) * consumers_cnt; +} + +#define DEF_CNT 4 + +static struct return_instance *alloc_return_instance(void) +{ + struct return_instance *ri; + + ri = kzalloc(ri_size(DEF_CNT), GFP_KERNEL); + if (!ri) + return ZERO_SIZE_PTR; + + ri->consumers_cnt = DEF_CNT; + return ri; +} + +static struct return_instance *dup_return_instance(struct return_instance *old) +{ + size_t size = ri_size(old->consumers_cnt); + + return kmemdup(old, size, GFP_KERNEL); +} + static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) { struct uprobe_task *n_utask; @@ -1773,11 +1804,10 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) p = &n_utask->return_instances; for (o = o_utask->return_instances; o; o = o->next) { - n = kmalloc(sizeof(struct return_instance), GFP_KERNEL); + n = dup_return_instance(o); if (!n) return -ENOMEM; - *n = *o; /* * uprobe's refcnt has to be positive at this point, kept by * utask->return_instances items; return_instances can't be @@ -1870,35 +1900,31 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained, utask->return_instances = ri; } -static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) +static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, + struct return_instance *ri) { struct uprobe_task *utask = current->utask; unsigned long orig_ret_vaddr, trampoline_vaddr; - struct return_instance *ri; bool chained; if (!get_xol_area()) - return; + goto free; if (utask->depth >= MAX_URETPROBE_DEPTH) { printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to" " nestedness limit pid/tgid=%d/%d\n", current->pid, current->tgid); - return; + goto free; } /* we need to bump refcount to store uprobe in utask */ if (!try_get_uprobe(uprobe)) - return; - - ri = kmalloc(sizeof(struct return_instance), GFP_KERNEL); - if (!ri) - goto fail; + goto free; trampoline_vaddr = uprobe_get_trampoline_vaddr(); orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); if (orig_ret_vaddr == -1) - goto fail; + goto put; /* drop the entries invalidated by longjmp() */ chained = (orig_ret_vaddr == trampoline_vaddr); @@ -1916,7 +1942,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) * attack from user-space. */ uprobe_warn(current, "handle tail call"); - goto fail; + goto put; } orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; } @@ -1931,9 +1957,10 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) utask->return_instances = ri; return; -fail: - kfree(ri); +put: put_uprobe(uprobe); +free: + kfree(ri); } /* Prepare to single-step probed instruction out of line. */ @@ -2077,34 +2104,90 @@ static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swb return uprobe; } +static struct return_instance* +push_consumer(struct return_instance *ri, int idx, __u64 id, __u64 cookie) +{ + if (unlikely(ri == ZERO_SIZE_PTR)) + return ri; + + if (unlikely(idx >= ri->consumers_cnt)) { + struct return_instance *old_ri = ri; + + ri->consumers_cnt += DEF_CNT; + ri = krealloc(old_ri, ri_size(old_ri->consumers_cnt), GFP_KERNEL); + if (!ri) { + kfree(old_ri); + return ZERO_SIZE_PTR; + } + } + + ri->consumers[idx].id = id; + ri->consumers[idx].cookie = cookie; + return ri; +} + +static struct return_consumer * +return_consumer_find(struct return_instance *ri, int *iter, int id) +{ + struct return_consumer *ric; + int idx = *iter; + + for (ric = &ri->consumers[idx]; idx < ri->consumers_cnt; idx++, ric++) { + if (ric->id == id) { + *iter = idx + 1; + return ric; + } + } + return NULL; +} + +static bool ignore_ret_handler(int rc) +{ + return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE; +} + static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) { struct uprobe_consumer *uc; - int remove = UPROBE_HANDLER_REMOVE; - bool need_prep = false; /* prepare return uprobe, when needed */ - bool has_consumers = false; + bool has_consumers = false, remove = true; + struct return_instance *ri = NULL; + int push_idx = 0; current->utask->auprobe = &uprobe->arch; list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { + bool session = uc->handler && uc->ret_handler; + __u64 cookie = 0; int rc = 0; if (uc->handler) { - rc = uc->handler(uc, regs, NULL); - WARN(rc & ~UPROBE_HANDLER_MASK, + rc = uc->handler(uc, regs, &cookie); + WARN(rc < 0 || rc > 2, "bad rc=0x%x from %ps()\n", rc, uc->handler); } - if (uc->ret_handler) - need_prep = true; - - remove &= rc; + remove &= rc == UPROBE_HANDLER_REMOVE; has_consumers = true; + + if (!uc->ret_handler || ignore_ret_handler(rc)) + continue; + + if (!ri) + ri = alloc_return_instance(); + + if (session) + ri = push_consumer(ri, push_idx++, uc->id, cookie); } current->utask->auprobe = NULL; - if (need_prep && !remove) - prepare_uretprobe(uprobe, regs); /* put bp at return */ + if (!ZERO_OR_NULL_PTR(ri)) { + /* + * The push_idx value has the final number of return consumers, + * and ri->consumers_cnt has number of allocated consumers. + */ + ri->consumers_cnt = push_idx; + prepare_uretprobe(uprobe, regs, ri); + } if (remove && has_consumers) { down_read(&uprobe->register_rwsem); @@ -2123,12 +2206,19 @@ static void handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) { struct uprobe *uprobe = ri->uprobe; + struct return_consumer *ric; struct uprobe_consumer *uc; + int ric_idx = 0; rcu_read_lock_trace(); list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { - if (uc->ret_handler) - uc->ret_handler(uc, ri->func, regs, NULL); + bool session = uc->handler && uc->ret_handler; + + if (uc->ret_handler) { + ric = return_consumer_find(ri, &ric_idx, uc->id); + if (!session || ric) + uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL); + } } rcu_read_unlock_trace(); } From 9b99d65c0bb4e37013bc2ec9c32b78c5751ff952 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 10 Oct 2024 07:26:03 -0700 Subject: [PATCH 18/21] perf/x86/rapl: Move the pmu allocation out of CPU hotplug There are extra codes in the CPU hotplug function to allocate rapl pmus. The generic PMU hotplug support is hard to be applied. As long as the rapl pmus can be allocated upfront for each die/socket, the code doesn't need to be implemented in the CPU hotplug function. Move the code to the init_rapl_pmus(), and allocate a PMU for each possible die/socket. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Oliver Sang Link: https://lore.kernel.org/r/20241010142604.770192-1-kan.liang@linux.intel.com --- arch/x86/events/rapl.c | 44 ++++++++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index a481a939862e..86cbee1a8bf0 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -602,19 +602,8 @@ static int rapl_cpu_online(unsigned int cpu) struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); int target; - if (!pmu) { - pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); - if (!pmu) - return -ENOMEM; - - raw_spin_lock_init(&pmu->lock); - INIT_LIST_HEAD(&pmu->active_list); - pmu->pmu = &rapl_pmus->pmu; - pmu->timer_interval = ms_to_ktime(rapl_timer_ms); - rapl_hrtimer_init(pmu); - - rapl_pmus->pmus[rapl_pmu_idx] = pmu; - } + if (!pmu) + return -ENOMEM; /* * Check if there is an online cpu in the package which collects rapl @@ -707,6 +696,32 @@ static const struct attribute_group *rapl_attr_update[] = { NULL, }; +static int __init init_rapl_pmu(void) +{ + struct rapl_pmu *pmu; + int idx; + + for (idx = 0; idx < rapl_pmus->nr_rapl_pmu; idx++) { + pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); + if (!pmu) + goto free; + + raw_spin_lock_init(&pmu->lock); + INIT_LIST_HEAD(&pmu->active_list); + pmu->pmu = &rapl_pmus->pmu; + pmu->timer_interval = ms_to_ktime(rapl_timer_ms); + rapl_hrtimer_init(pmu); + + rapl_pmus->pmus[idx] = pmu; + } + + return 0; +free: + for (; idx > 0; idx--) + kfree(rapl_pmus->pmus[idx - 1]); + return -ENOMEM; +} + static int __init init_rapl_pmus(void) { int nr_rapl_pmu = topology_max_packages(); @@ -730,7 +745,8 @@ static int __init init_rapl_pmus(void) rapl_pmus->pmu.read = rapl_pmu_event_read; rapl_pmus->pmu.module = THIS_MODULE; rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; - return 0; + + return init_rapl_pmu(); } static struct rapl_model model_snb = { From 9e9af8bbb5f9b565b9faf691f96f661791e199b0 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 10 Oct 2024 07:26:04 -0700 Subject: [PATCH 19/21] perf/x86/rapl: Clean up cpumask and hotplug The rapl pmu is die scope, which is supported by the generic perf_event subsystem now. Set the scope for the rapl PMU and remove all the cpumask and hotplug codes. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Oliver Sang Tested-by: Dhananjay Ugwekar Link: https://lore.kernel.org/r/20241010142604.770192-2-kan.liang@linux.intel.com --- arch/x86/events/rapl.c | 90 +++----------------------------------- include/linux/cpuhotplug.h | 1 - 2 files changed, 6 insertions(+), 85 deletions(-) diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index 86cbee1a8bf0..a8defc813c36 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -148,7 +148,6 @@ struct rapl_model { /* 1/2^hw_unit Joule */ static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly; static struct rapl_pmus *rapl_pmus; -static cpumask_t rapl_cpu_mask; static unsigned int rapl_cntr_mask; static u64 rapl_timer_ms; static struct perf_msr *rapl_msrs; @@ -369,8 +368,6 @@ static int rapl_pmu_event_init(struct perf_event *event) if (event->cpu < 0) return -EINVAL; - event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; - if (!cfg || cfg >= NR_RAPL_DOMAINS + 1) return -EINVAL; @@ -389,7 +386,6 @@ static int rapl_pmu_event_init(struct perf_event *event) pmu = cpu_to_rapl_pmu(event->cpu); if (!pmu) return -EINVAL; - event->cpu = pmu->cpu; event->pmu_private = pmu; event->hw.event_base = rapl_msrs[bit].msr; event->hw.config = cfg; @@ -403,23 +399,6 @@ static void rapl_pmu_event_read(struct perf_event *event) rapl_event_update(event); } -static ssize_t rapl_get_attr_cpumask(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask); -} - -static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL); - -static struct attribute *rapl_pmu_attrs[] = { - &dev_attr_cpumask.attr, - NULL, -}; - -static struct attribute_group rapl_pmu_attr_group = { - .attrs = rapl_pmu_attrs, -}; - RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); @@ -467,7 +446,6 @@ static struct attribute_group rapl_pmu_format_group = { }; static const struct attribute_group *rapl_attr_groups[] = { - &rapl_pmu_attr_group, &rapl_pmu_format_group, &rapl_pmu_events_group, NULL, @@ -570,54 +548,6 @@ static struct perf_msr amd_rapl_msrs[] = { [PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, NULL, false, 0 }, }; -static int rapl_cpu_offline(unsigned int cpu) -{ - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); - int target; - - /* Check if exiting cpu is used for collecting rapl events */ - if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask)) - return 0; - - pmu->cpu = -1; - /* Find a new cpu to collect rapl events */ - target = cpumask_any_but(get_rapl_pmu_cpumask(cpu), cpu); - - /* Migrate rapl events to the new target */ - if (target < nr_cpu_ids) { - cpumask_set_cpu(target, &rapl_cpu_mask); - pmu->cpu = target; - perf_pmu_migrate_context(pmu->pmu, cpu, target); - } - return 0; -} - -static int rapl_cpu_online(unsigned int cpu) -{ - s32 rapl_pmu_idx = get_rapl_pmu_idx(cpu); - if (rapl_pmu_idx < 0) { - pr_err("topology_logical_(package/die)_id() returned a negative value"); - return -EINVAL; - } - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); - int target; - - if (!pmu) - return -ENOMEM; - - /* - * Check if there is an online cpu in the package which collects rapl - * events already. - */ - target = cpumask_any_and(&rapl_cpu_mask, get_rapl_pmu_cpumask(cpu)); - if (target < nr_cpu_ids) - return 0; - - cpumask_set_cpu(cpu, &rapl_cpu_mask); - pmu->cpu = cpu; - return 0; -} - static int rapl_check_hw_unit(struct rapl_model *rm) { u64 msr_rapl_power_unit_bits; @@ -725,9 +655,12 @@ free: static int __init init_rapl_pmus(void) { int nr_rapl_pmu = topology_max_packages(); + int rapl_pmu_scope = PERF_PMU_SCOPE_PKG; - if (!rapl_pmu_is_pkg_scope()) + if (!rapl_pmu_is_pkg_scope()) { nr_rapl_pmu *= topology_max_dies_per_package(); + rapl_pmu_scope = PERF_PMU_SCOPE_DIE; + } rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL); if (!rapl_pmus) @@ -743,6 +676,7 @@ static int __init init_rapl_pmus(void) rapl_pmus->pmu.start = rapl_pmu_event_start; rapl_pmus->pmu.stop = rapl_pmu_event_stop; rapl_pmus->pmu.read = rapl_pmu_event_read; + rapl_pmus->pmu.scope = rapl_pmu_scope; rapl_pmus->pmu.module = THIS_MODULE; rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; @@ -892,24 +826,13 @@ static int __init rapl_pmu_init(void) if (ret) return ret; - /* - * Install callbacks. Core will call them for each online cpu. - */ - ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, - "perf/x86/rapl:online", - rapl_cpu_online, rapl_cpu_offline); - if (ret) - goto out; - ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); if (ret) - goto out1; + goto out; rapl_advertise(); return 0; -out1: - cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); out: pr_warn("Initialization failed (%d), disabled\n", ret); cleanup_rapl_pmus(); @@ -919,7 +842,6 @@ module_init(rapl_pmu_init); static void __exit intel_rapl_exit(void) { - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); perf_pmu_unregister(&rapl_pmus->pmu); cleanup_rapl_pmus(); } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 2361ed4d2b15..37a9afffb59e 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -208,7 +208,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, - CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, From 2bf8e5aceff899f5117f14c73e869a61c44d8a69 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 23 Oct 2024 21:41:58 -0700 Subject: [PATCH 20/21] uprobes: allow put_uprobe() from non-sleepable softirq context Currently put_uprobe() might trigger mutex_lock()/mutex_unlock(), which makes it unsuitable to be called from more restricted context like softirq. Let's make put_uprobe() agnostic to the context in which it is called, and use work queue to defer the mutex-protected clean up steps. RB tree removal step is also moved into work-deferred callback to avoid potential deadlock between softirq-based timer callback, added in the next patch, and the rest of uprobe code. We can rework locking altogher as a follow up, but that's significantly more tricky, so warrants its own patch set. For now, we need to make sure that changes in the next patch that add timer thread work correctly with existing approach, while concentrating on SRCU + timeout logic. Signed-off-by: Andrii Nakryiko Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20241024044159.3156646-2-andrii@kernel.org --- kernel/events/uprobes.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 4ef4b51776eb..d7e489246608 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -61,7 +62,10 @@ struct uprobe { struct list_head pending_list; struct list_head consumers; struct inode *inode; /* Also hold a ref to inode */ - struct rcu_head rcu; + union { + struct rcu_head rcu; + struct work_struct work; + }; loff_t offset; loff_t ref_ctr_offset; unsigned long flags; /* "unsigned long" so bitops work */ @@ -625,10 +629,9 @@ static void uprobe_free_rcu(struct rcu_head *rcu) kfree(uprobe); } -static void put_uprobe(struct uprobe *uprobe) +static void uprobe_free_deferred(struct work_struct *work) { - if (!refcount_dec_and_test(&uprobe->ref)) - return; + struct uprobe *uprobe = container_of(work, struct uprobe, work); write_lock(&uprobes_treelock); @@ -652,6 +655,15 @@ static void put_uprobe(struct uprobe *uprobe) call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu); } +static void put_uprobe(struct uprobe *uprobe) +{ + if (!refcount_dec_and_test(&uprobe->ref)) + return; + + INIT_WORK(&uprobe->work, uprobe_free_deferred); + schedule_work(&uprobe->work); +} + static __always_inline int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, const struct uprobe *r) From dd1a7567784e2b1f80258be04f57bcfa82c997eb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 23 Oct 2024 21:41:59 -0700 Subject: [PATCH 21/21] uprobes: SRCU-protect uretprobe lifetime (with timeout) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid taking refcount on uprobe in prepare_uretprobe(), instead take uretprobe-specific SRCU lock and keep it active as kernel transfers control back to user space. Given we can't rely on user space returning from traced function within reasonable time period, we need to make sure not to keep SRCU lock active for too long, though. To that effect, we employ a timer callback which is meant to terminate SRCU lock region after predefined timeout (currently set to 100ms), and instead transfer underlying struct uprobe's lifetime protection to refcounting. This fallback to less scalable refcounting after 100ms is a fine tradeoff from uretprobe's scalability and performance perspective, because uretprobing *long running* user functions inherently doesn't run into scalability issues (there is just not enough frequency to cause noticeable issues with either performance or scalability). The overall trick is in ensuring synchronization between current thread and timer's callback fired on some other thread. To cope with that with minimal logic complications, we add hprobe wrapper which is used to contain all the synchronization related issues behind a small number of basic helpers: hprobe_expire() for "downgrading" uprobe from SRCU-protected state to refcounted state, and a hprobe_consume() and hprobe_finalize() pair of single-use consuming helpers. Other than that, whatever current thread's logic is there stays the same, as timer thread cannot modify return_instance state (or add new/remove old return_instances). It only takes care of SRCU unlock and uprobe refcounting, which is hidden from the higher-level uretprobe handling logic. We use atomic xchg() in hprobe_consume(), which is called from performance critical handle_uretprobe_chain() function run in the current context. When uncontended, this xchg() doesn't seem to hurt performance as there are no other competing CPUs fighting for the same cache line. We also mark struct return_instance as ____cacheline_aligned to ensure no false sharing can happen. Another technical moment. We need to make sure that the list of return instances can be safely traversed under RCU from timer callback, so we delay return_instance freeing with kfree_rcu() and make sure that list modifications use RCU-aware operations. Also, given SRCU lock survives transition from kernel to user space and back we need to use lower-level __srcu_read_lock() and __srcu_read_unlock() to avoid lockdep complaining. Just to give an impression of a kind of performance improvements this change brings, below are benchmarking results with and without these SRCU changes, assuming other uprobe optimizations (mainly RCU Tasks Trace for entry uprobes, lockless RB-tree lookup, and lockless VMA to uprobe lookup) are left intact: WITHOUT SRCU for uretprobes =========================== uretprobe-nop ( 1 cpus): 2.197 ± 0.002M/s ( 2.197M/s/cpu) uretprobe-nop ( 2 cpus): 3.325 ± 0.001M/s ( 1.662M/s/cpu) uretprobe-nop ( 3 cpus): 4.129 ± 0.002M/s ( 1.376M/s/cpu) uretprobe-nop ( 4 cpus): 6.180 ± 0.003M/s ( 1.545M/s/cpu) uretprobe-nop ( 8 cpus): 7.323 ± 0.005M/s ( 0.915M/s/cpu) uretprobe-nop (16 cpus): 6.943 ± 0.005M/s ( 0.434M/s/cpu) uretprobe-nop (32 cpus): 5.931 ± 0.014M/s ( 0.185M/s/cpu) uretprobe-nop (64 cpus): 5.145 ± 0.003M/s ( 0.080M/s/cpu) uretprobe-nop (80 cpus): 4.925 ± 0.005M/s ( 0.062M/s/cpu) WITH SRCU for uretprobes ======================== uretprobe-nop ( 1 cpus): 1.968 ± 0.001M/s ( 1.968M/s/cpu) uretprobe-nop ( 2 cpus): 3.739 ± 0.003M/s ( 1.869M/s/cpu) uretprobe-nop ( 3 cpus): 5.616 ± 0.003M/s ( 1.872M/s/cpu) uretprobe-nop ( 4 cpus): 7.286 ± 0.002M/s ( 1.822M/s/cpu) uretprobe-nop ( 8 cpus): 13.657 ± 0.007M/s ( 1.707M/s/cpu) uretprobe-nop (32 cpus): 45.305 ± 0.066M/s ( 1.416M/s/cpu) uretprobe-nop (64 cpus): 42.390 ± 0.922M/s ( 0.662M/s/cpu) uretprobe-nop (80 cpus): 47.554 ± 2.411M/s ( 0.594M/s/cpu) Signed-off-by: Andrii Nakryiko Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20241024044159.3156646-3-andrii@kernel.org --- include/linux/uprobes.h | 54 +++++++- kernel/events/uprobes.c | 291 +++++++++++++++++++++++++++++++++++----- 2 files changed, 306 insertions(+), 39 deletions(-) diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index dbaf04189548..7a051b5d2edd 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -15,6 +15,7 @@ #include #include #include +#include struct uprobe; struct vm_area_struct; @@ -67,6 +68,53 @@ enum uprobe_task_state { UTASK_SSTEP_TRAPPED, }; +/* The state of hybrid-lifetime uprobe inside struct return_instance */ +enum hprobe_state { + HPROBE_LEASED, /* uretprobes_srcu-protected uprobe */ + HPROBE_STABLE, /* refcounted uprobe */ + HPROBE_GONE, /* NULL uprobe, SRCU expired, refcount failed */ + HPROBE_CONSUMED, /* uprobe "consumed" by uretprobe handler */ +}; + +/* + * Hybrid lifetime uprobe. Represents a uprobe instance that could be either + * SRCU protected (with SRCU protection eventually potentially timing out), + * refcounted using uprobe->ref, or there could be no valid uprobe (NULL). + * + * hprobe's internal state is setup such that background timer thread can + * atomically "downgrade" temporarily RCU-protected uprobe into refcounted one + * (or no uprobe, if refcounting failed). + * + * *stable* pointer always point to the uprobe (or could be NULL if there is + * was no valid underlying uprobe to begin with). + * + * *leased* pointer is the key to achieving race-free atomic lifetime state + * transition and can have three possible states: + * - either the same non-NULL value as *stable*, in which case uprobe is + * SRCU-protected; + * - NULL, in which case uprobe (if there is any) is refcounted; + * - special __UPROBE_DEAD value, which represents an uprobe that was SRCU + * protected initially, but SRCU period timed out and we attempted to + * convert it to refcounted, but refcount_inc_not_zero() failed, because + * uprobe effectively went away (the last consumer unsubscribed). In this + * case it's important to know that *stable* pointer (which still has + * non-NULL uprobe pointer) shouldn't be used, because lifetime of + * underlying uprobe is not guaranteed anymore. __UPROBE_DEAD is just an + * internal marker and is handled transparently by hprobe_fetch() helper. + * + * When uprobe is SRCU-protected, we also record srcu_idx value, necessary for + * SRCU unlocking. + * + * See hprobe_expire() and hprobe_fetch() for details of race-free uprobe + * state transitioning details. It all hinges on atomic xchg() over *leaded* + * pointer. *stable* pointer, once initially set, is not modified concurrently. + */ +struct hprobe { + enum hprobe_state state; + int srcu_idx; + struct uprobe *uprobe; +}; + /* * uprobe_task: Metadata of a task while it singlesteps. */ @@ -86,6 +134,7 @@ struct uprobe_task { }; struct uprobe *active_uprobe; + struct timer_list ri_timer; unsigned long xol_vaddr; struct arch_uprobe *auprobe; @@ -100,7 +149,7 @@ struct return_consumer { }; struct return_instance { - struct uprobe *uprobe; + struct hprobe hprobe; unsigned long func; unsigned long stack; /* stack pointer */ unsigned long orig_ret_vaddr; /* original return address */ @@ -108,9 +157,10 @@ struct return_instance { int consumers_cnt; struct return_instance *next; /* keep as stack */ + struct rcu_head rcu; struct return_consumer consumers[] __counted_by(consumers_cnt); -}; +} ____cacheline_aligned; enum rp_check { RP_CHECK_CALL, diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index d7e489246608..a76ddc5fc982 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -51,6 +52,9 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem); +/* Covers return_instance's uprobe lifetime. */ +DEFINE_STATIC_SRCU(uretprobes_srcu); + /* Have a copy of original instruction */ #define UPROBE_COPY_INSN 0 @@ -622,13 +626,20 @@ static inline bool uprobe_is_active(struct uprobe *uprobe) return !RB_EMPTY_NODE(&uprobe->rb_node); } -static void uprobe_free_rcu(struct rcu_head *rcu) +static void uprobe_free_rcu_tasks_trace(struct rcu_head *rcu) { struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); kfree(uprobe); } +static void uprobe_free_srcu(struct rcu_head *rcu) +{ + struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu); + + call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace); +} + static void uprobe_free_deferred(struct work_struct *work) { struct uprobe *uprobe = container_of(work, struct uprobe, work); @@ -652,7 +663,8 @@ static void uprobe_free_deferred(struct work_struct *work) delayed_uprobe_remove(uprobe, NULL); mutex_unlock(&delayed_uprobe_lock); - call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu); + /* start srcu -> rcu_tasks_trace -> kfree chain */ + call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu); } static void put_uprobe(struct uprobe *uprobe) @@ -664,6 +676,151 @@ static void put_uprobe(struct uprobe *uprobe) schedule_work(&uprobe->work); } +/* Initialize hprobe as SRCU-protected "leased" uprobe */ +static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx) +{ + WARN_ON(!uprobe); + hprobe->state = HPROBE_LEASED; + hprobe->uprobe = uprobe; + hprobe->srcu_idx = srcu_idx; +} + +/* Initialize hprobe as refcounted ("stable") uprobe (uprobe can be NULL). */ +static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe) +{ + hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE; + hprobe->uprobe = uprobe; + hprobe->srcu_idx = -1; +} + +/* + * hprobe_consume() fetches hprobe's underlying uprobe and detects whether + * uprobe is SRCU protected or is refcounted. hprobe_consume() can be + * used only once for a given hprobe. + * + * Caller has to call hprobe_finalize() and pass previous hprobe_state, so + * that hprobe_finalize() can perform SRCU unlock or put uprobe, whichever + * is appropriate. + */ +static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate) +{ + *hstate = xchg(&hprobe->state, HPROBE_CONSUMED); + switch (*hstate) { + case HPROBE_LEASED: + case HPROBE_STABLE: + return hprobe->uprobe; + case HPROBE_GONE: /* uprobe is NULL, no SRCU */ + case HPROBE_CONSUMED: /* uprobe was finalized already, do nothing */ + return NULL; + default: + WARN(1, "hprobe invalid state %d", *hstate); + return NULL; + } +} + +/* + * Reset hprobe state and, if hprobe was LEASED, release SRCU lock. + * hprobe_finalize() can only be used from current context after + * hprobe_consume() call (which determines uprobe and hstate value). + */ +static void hprobe_finalize(struct hprobe *hprobe, enum hprobe_state hstate) +{ + switch (hstate) { + case HPROBE_LEASED: + __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx); + break; + case HPROBE_STABLE: + put_uprobe(hprobe->uprobe); + break; + case HPROBE_GONE: + case HPROBE_CONSUMED: + break; + default: + WARN(1, "hprobe invalid state %d", hstate); + break; + } +} + +/* + * Attempt to switch (atomically) uprobe from being SRCU protected (LEASED) + * to refcounted (STABLE) state. Competes with hprobe_consume(); only one of + * them can win the race to perform SRCU unlocking. Whoever wins must perform + * SRCU unlock. + * + * Returns underlying valid uprobe or NULL, if there was no underlying uprobe + * to begin with or we failed to bump its refcount and it's going away. + * + * Returned non-NULL uprobe can be still safely used within an ongoing SRCU + * locked region. If `get` is true, it's guaranteed that non-NULL uprobe has + * an extra refcount for caller to assume and use. Otherwise, it's not + * guaranteed that returned uprobe has a positive refcount, so caller has to + * attempt try_get_uprobe(), if it needs to preserve uprobe beyond current + * SRCU lock region. See dup_utask(). + */ +static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get) +{ + enum hprobe_state hstate; + + /* + * return_instance's hprobe is protected by RCU. + * Underlying uprobe is itself protected from reuse by SRCU. + */ + lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu)); + + hstate = READ_ONCE(hprobe->state); + switch (hstate) { + case HPROBE_STABLE: + /* uprobe has positive refcount, bump refcount, if necessary */ + return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe; + case HPROBE_GONE: + /* + * SRCU was unlocked earlier and we didn't manage to take + * uprobe refcnt, so it's effectively NULL + */ + return NULL; + case HPROBE_CONSUMED: + /* + * uprobe was consumed, so it's effectively NULL as far as + * uretprobe processing logic is concerned + */ + return NULL; + case HPROBE_LEASED: { + struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe); + /* + * Try to switch hprobe state, guarding against + * hprobe_consume() or another hprobe_expire() racing with us. + * Note, if we failed to get uprobe refcount, we use special + * HPROBE_GONE state to signal that hprobe->uprobe shouldn't + * be used as it will be freed after SRCU is unlocked. + */ + if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) { + /* We won the race, we are the ones to unlock SRCU */ + __srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx); + return get ? get_uprobe(uprobe) : uprobe; + } + + /* + * We lost the race, undo refcount bump (if it ever happened), + * unless caller would like an extra refcount anyways. + */ + if (uprobe && !get) + put_uprobe(uprobe); + /* + * Even if hprobe_consume() or another hprobe_expire() wins + * the state update race and unlocks SRCU from under us, we + * still have a guarantee that underyling uprobe won't be + * freed due to ongoing caller's SRCU lock region, so we can + * return it regardless. Also, if `get` was true, we also have + * an extra ref for the caller to own. This is used in dup_utask(). + */ + return uprobe; + } + default: + WARN(1, "unknown hprobe state %d", hstate); + return NULL; + } +} + static __always_inline int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset, const struct uprobe *r) @@ -1169,6 +1326,7 @@ void uprobe_unregister_sync(void) * handler_chain() or handle_uretprobe_chain() to do an use-after-free. */ synchronize_rcu_tasks_trace(); + synchronize_srcu(&uretprobes_srcu); } EXPORT_SYMBOL_GPL(uprobe_unregister_sync); @@ -1731,11 +1889,18 @@ unsigned long uprobe_get_trap_addr(struct pt_regs *regs) return instruction_pointer(regs); } -static struct return_instance *free_ret_instance(struct return_instance *ri) +static struct return_instance *free_ret_instance(struct return_instance *ri, bool cleanup_hprobe) { struct return_instance *next = ri->next; - put_uprobe(ri->uprobe); - kfree(ri); + + if (cleanup_hprobe) { + enum hprobe_state hstate; + + (void)hprobe_consume(&ri->hprobe, &hstate); + hprobe_finalize(&ri->hprobe, hstate); + } + + kfree_rcu(ri, rcu); return next; } @@ -1753,14 +1918,48 @@ void uprobe_free_utask(struct task_struct *t) WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr); + timer_delete_sync(&utask->ri_timer); + ri = utask->return_instances; while (ri) - ri = free_ret_instance(ri); + ri = free_ret_instance(ri, true /* cleanup_hprobe */); kfree(utask); t->utask = NULL; } +#define RI_TIMER_PERIOD (HZ / 10) /* 100 ms */ + +#define for_each_ret_instance_rcu(pos, head) \ + for (pos = rcu_dereference_raw(head); pos; pos = rcu_dereference_raw(pos->next)) + +static void ri_timer(struct timer_list *timer) +{ + struct uprobe_task *utask = container_of(timer, struct uprobe_task, ri_timer); + struct return_instance *ri; + + /* SRCU protects uprobe from reuse for the cmpxchg() inside hprobe_expire(). */ + guard(srcu)(&uretprobes_srcu); + /* RCU protects return_instance from freeing. */ + guard(rcu)(); + + for_each_ret_instance_rcu(ri, utask->return_instances) + hprobe_expire(&ri->hprobe, false); +} + +static struct uprobe_task *alloc_utask(void) +{ + struct uprobe_task *utask; + + utask = kzalloc(sizeof(*utask), GFP_KERNEL); + if (!utask) + return NULL; + + timer_setup(&utask->ri_timer, ri_timer, 0); + + return utask; +} + /* * Allocate a uprobe_task object for the task if necessary. * Called when the thread hits a breakpoint. @@ -1772,7 +1971,7 @@ void uprobe_free_utask(struct task_struct *t) static struct uprobe_task *get_utask(void) { if (!current->utask) - current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); + current->utask = alloc_utask(); return current->utask; } @@ -1808,29 +2007,37 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) { struct uprobe_task *n_utask; struct return_instance **p, *o, *n; + struct uprobe *uprobe; - n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL); + n_utask = alloc_utask(); if (!n_utask) return -ENOMEM; t->utask = n_utask; + /* protect uprobes from freeing, we'll need try_get_uprobe() them */ + guard(srcu)(&uretprobes_srcu); + p = &n_utask->return_instances; for (o = o_utask->return_instances; o; o = o->next) { n = dup_return_instance(o); if (!n) return -ENOMEM; - /* - * uprobe's refcnt has to be positive at this point, kept by - * utask->return_instances items; return_instances can't be - * removed right now, as task is blocked due to duping; so - * get_uprobe() is safe to use here. - */ - get_uprobe(n->uprobe); - n->next = NULL; + /* if uprobe is non-NULL, we'll have an extra refcount for uprobe */ + uprobe = hprobe_expire(&o->hprobe, true); - *p = n; + /* + * New utask will have stable properly refcounted uprobe or + * NULL. Even if we failed to get refcounted uprobe, we still + * need to preserve full set of return_instances for proper + * uretprobe handling and nesting in forked task. + */ + hprobe_init_stable(&n->hprobe, uprobe); + + n->next = NULL; + rcu_assign_pointer(*p, n); p = &n->next; + n_utask->depth++; } @@ -1906,10 +2113,10 @@ static void cleanup_return_instances(struct uprobe_task *utask, bool chained, enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL; while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) { - ri = free_ret_instance(ri); + ri = free_ret_instance(ri, true /* cleanup_hprobe */); utask->depth--; } - utask->return_instances = ri; + rcu_assign_pointer(utask->return_instances, ri); } static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, @@ -1918,6 +2125,7 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, struct uprobe_task *utask = current->utask; unsigned long orig_ret_vaddr, trampoline_vaddr; bool chained; + int srcu_idx; if (!get_xol_area()) goto free; @@ -1929,14 +2137,10 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, goto free; } - /* we need to bump refcount to store uprobe in utask */ - if (!try_get_uprobe(uprobe)) - goto free; - trampoline_vaddr = uprobe_get_trampoline_vaddr(); orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs); if (orig_ret_vaddr == -1) - goto put; + goto free; /* drop the entries invalidated by longjmp() */ chained = (orig_ret_vaddr == trampoline_vaddr); @@ -1954,23 +2158,28 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs, * attack from user-space. */ uprobe_warn(current, "handle tail call"); - goto put; + goto free; } orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; } - ri->uprobe = uprobe; + + /* __srcu_read_lock() because SRCU lock survives switch to user space */ + srcu_idx = __srcu_read_lock(&uretprobes_srcu); + ri->func = instruction_pointer(regs); ri->stack = user_stack_pointer(regs); ri->orig_ret_vaddr = orig_ret_vaddr; ri->chained = chained; utask->depth++; + + hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx); ri->next = utask->return_instances; - utask->return_instances = ri; + rcu_assign_pointer(utask->return_instances, ri); + + mod_timer(&utask->ri_timer, jiffies + RI_TIMER_PERIOD); return; -put: - put_uprobe(uprobe); free: kfree(ri); } @@ -2215,13 +2424,16 @@ static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs) } static void -handle_uretprobe_chain(struct return_instance *ri, struct pt_regs *regs) +handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs) { - struct uprobe *uprobe = ri->uprobe; struct return_consumer *ric; struct uprobe_consumer *uc; int ric_idx = 0; + /* all consumers unsubscribed meanwhile */ + if (unlikely(!uprobe)) + return; + rcu_read_lock_trace(); list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) { bool session = uc->handler && uc->ret_handler; @@ -2251,6 +2463,8 @@ void uprobe_handle_trampoline(struct pt_regs *regs) { struct uprobe_task *utask; struct return_instance *ri, *next; + struct uprobe *uprobe; + enum hprobe_state hstate; bool valid; utask = current->utask; @@ -2281,21 +2495,24 @@ void uprobe_handle_trampoline(struct pt_regs *regs) * trampoline addresses on the stack are replaced with correct * original return addresses */ - utask->return_instances = ri->next; + rcu_assign_pointer(utask->return_instances, ri->next); + + uprobe = hprobe_consume(&ri->hprobe, &hstate); if (valid) - handle_uretprobe_chain(ri, regs); - ri = free_ret_instance(ri); + handle_uretprobe_chain(ri, uprobe, regs); + hprobe_finalize(&ri->hprobe, hstate); + + /* We already took care of hprobe, no need to waste more time on that. */ + ri = free_ret_instance(ri, false /* !cleanup_hprobe */); utask->depth--; } while (ri != next); } while (!valid); - utask->return_instances = ri; return; - sigill: +sigill: uprobe_warn(current, "handle uretprobe, sending SIGILL."); force_sig(SIGILL); - } bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)