From 9cf9aceed21e3f08c94108bd688e812effce4423 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 28 Dec 2024 11:02:50 +0100 Subject: [PATCH 1/3] sched_ext: idle: use assign_cpu() to update the idle cpumask Use the assign_cpu() helper to set or clear the CPU in the idle mask, based on the idle condition. Acked-by: Yury Norov Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 7b229a4fb083..eec4716de225 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3654,10 +3654,7 @@ void __scx_update_idle(struct rq *rq, bool idle) return; } - if (idle) - cpumask_set_cpu(cpu, idle_masks.cpu); - else - cpumask_clear_cpu(cpu, idle_masks.cpu); + assign_cpu(cpu, idle_masks.cpu, idle); #ifdef CONFIG_SCHED_SMT if (sched_smt_active()) { From 02f034dcbf3dcb0989e638fdc00d10984dc2278b Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 28 Dec 2024 11:04:11 +0100 Subject: [PATCH 2/3] sched_ext: idle: clarify comments Add a comments to clarify about the usage of cpumask_intersects(). Moreover, update scx_select_cpu_dfl() description clarifying that the final step of the idle selection logic involves searching for any idle CPU in the system that the task can use. Reviewed-by: Yury Norov Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index eec4716de225..2d701203a3db 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3180,6 +3180,10 @@ static bool test_and_clear_cpu_idle(int cpu) * scx_pick_idle_cpu() can get caught in an infinite loop as * @cpu is never cleared from idle_masks.smt. Ensure that @cpu * is eventually cleared. + * + * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to + * reduce memory writes, which may help alleviate cache + * coherence pressure. */ if (cpumask_intersects(smt, idle_masks.smt)) cpumask_andnot(idle_masks.smt, idle_masks.smt, smt); @@ -3408,6 +3412,8 @@ static void update_selcpu_topology(void) * 4. Pick a CPU within the same NUMA node, if enabled: * - choose a CPU from the same NUMA node to reduce memory access latency. * + * 5. Pick any idle CPU usable by the task. + * * Step 3 and 4 are performed only if the system has, respectively, multiple * LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and * scx_selcpu_topo_numa). From c0cf3530098bc13578bd4d0692351a2c4a57425d Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 28 Dec 2024 11:06:33 +0100 Subject: [PATCH 3/3] sched_ext: idle: introduce check_builtin_idle_enabled() helper Minor refactoring to add a helper function for checking if the built-in idle CPU selection policy is enabled. Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 2d701203a3db..926579624c41 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -6297,6 +6297,15 @@ void __init init_sched_ext_class(void) __bpf_kfunc_start_defs(); +static bool check_builtin_idle_enabled(void) +{ + if (static_branch_likely(&scx_builtin_idle_enabled)) + return true; + + scx_ops_error("built-in idle tracking is disabled"); + return false; +} + /** * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu() * @p: task_struct to select a CPU for @@ -6314,10 +6323,8 @@ __bpf_kfunc_start_defs(); __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) { - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); + if (!check_builtin_idle_enabled()) goto prev_cpu; - } if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) goto prev_cpu; @@ -7411,10 +7418,8 @@ __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask) */ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void) { - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); + if (!check_builtin_idle_enabled()) return cpu_none_mask; - } #ifdef CONFIG_SMP return idle_masks.cpu; @@ -7432,10 +7437,8 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void) */ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void) { - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); + if (!check_builtin_idle_enabled()) return cpu_none_mask; - } #ifdef CONFIG_SMP if (sched_smt_active()) @@ -7473,10 +7476,8 @@ __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask) */ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) { - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); + if (!check_builtin_idle_enabled()) return false; - } if (ops_cpu_valid(cpu, NULL)) return test_and_clear_cpu_idle(cpu); @@ -7506,10 +7507,8 @@ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); + if (!check_builtin_idle_enabled()) return -EBUSY; - } return scx_pick_idle_cpu(cpus_allowed, flags); }