Merge branch 'for-6.14' into for-next

This commit is contained in:
Tejun Heo 2024-12-29 12:45:19 -10:00
commit 37e6cc1c0c

View File

@ -3180,6 +3180,10 @@ static bool test_and_clear_cpu_idle(int cpu)
* scx_pick_idle_cpu() can get caught in an infinite loop as
* @cpu is never cleared from idle_masks.smt. Ensure that @cpu
* is eventually cleared.
*
* NOTE: Use cpumask_intersects() and cpumask_test_cpu() to
* reduce memory writes, which may help alleviate cache
* coherence pressure.
*/
if (cpumask_intersects(smt, idle_masks.smt))
cpumask_andnot(idle_masks.smt, idle_masks.smt, smt);
@ -3408,6 +3412,8 @@ static void update_selcpu_topology(void)
* 4. Pick a CPU within the same NUMA node, if enabled:
* - choose a CPU from the same NUMA node to reduce memory access latency.
*
* 5. Pick any idle CPU usable by the task.
*
* Step 3 and 4 are performed only if the system has, respectively, multiple
* LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and
* scx_selcpu_topo_numa).
@ -3654,10 +3660,7 @@ void __scx_update_idle(struct rq *rq, bool idle)
return;
}
if (idle)
cpumask_set_cpu(cpu, idle_masks.cpu);
else
cpumask_clear_cpu(cpu, idle_masks.cpu);
assign_cpu(cpu, idle_masks.cpu, idle);
#ifdef CONFIG_SCHED_SMT
if (sched_smt_active()) {
@ -6294,6 +6297,15 @@ void __init init_sched_ext_class(void)
__bpf_kfunc_start_defs();
static bool check_builtin_idle_enabled(void)
{
if (static_branch_likely(&scx_builtin_idle_enabled))
return true;
scx_ops_error("built-in idle tracking is disabled");
return false;
}
/**
* scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
* @p: task_struct to select a CPU for
@ -6311,10 +6323,8 @@ __bpf_kfunc_start_defs();
__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
u64 wake_flags, bool *is_idle)
{
if (!static_branch_likely(&scx_builtin_idle_enabled)) {
scx_ops_error("built-in idle tracking is disabled");
if (!check_builtin_idle_enabled())
goto prev_cpu;
}
if (!scx_kf_allowed(SCX_KF_SELECT_CPU))
goto prev_cpu;
@ -7408,10 +7418,8 @@ __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask)
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
{
if (!static_branch_likely(&scx_builtin_idle_enabled)) {
scx_ops_error("built-in idle tracking is disabled");
if (!check_builtin_idle_enabled())
return cpu_none_mask;
}
#ifdef CONFIG_SMP
return idle_masks.cpu;
@ -7429,10 +7437,8 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
{
if (!static_branch_likely(&scx_builtin_idle_enabled)) {
scx_ops_error("built-in idle tracking is disabled");
if (!check_builtin_idle_enabled())
return cpu_none_mask;
}
#ifdef CONFIG_SMP
if (sched_smt_active())
@ -7470,10 +7476,8 @@ __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
*/
__bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
{
if (!static_branch_likely(&scx_builtin_idle_enabled)) {
scx_ops_error("built-in idle tracking is disabled");
if (!check_builtin_idle_enabled())
return false;
}
if (ops_cpu_valid(cpu, NULL))
return test_and_clear_cpu_idle(cpu);
@ -7503,10 +7507,8 @@ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu)
__bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed,
u64 flags)
{
if (!static_branch_likely(&scx_builtin_idle_enabled)) {
scx_ops_error("built-in idle tracking is disabled");
if (!check_builtin_idle_enabled())
return -EBUSY;
}
return scx_pick_idle_cpu(cpus_allowed, flags);
}