mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 02:05:33 +00:00
sched_ext: Fixes for v6.13-rc5
- Fix the bug where bpf_iter_scx_dsq_new() was not initializing the iterator's flags and could inadvertently enable e.g. reverse iteration. - Fix the bug where scx_ops_bypass() could call irq_restore twice. - Add Andrea and Changwoo as maintainers for better review coverage. - selftests and tools/sched_ext build and other fixes. -----BEGIN PGP SIGNATURE----- iIQEABYKACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCZ3hpXg4cdGpAa2VybmVs Lm9yZwAKCRCxYfJx3gVYGS/lAQDOZDfcJtO1VEsLoPY9NhFHPuBDTfoJyjSi/4mh GsjgDAD/Sx0rN6C9S/+ToUjmq3FA+ft0m2+97VqgLwkzwA9YxwI= =jaZ6 -----END PGP SIGNATURE----- Merge tag 'sched_ext-for-6.13-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext Pull sched_ext fixes from Tejun Heo: - Fix a bug where bpf_iter_scx_dsq_new() was not initializing the iterator's flags and could inadvertently enable e.g. reverse iteration - Fix a bug where scx_ops_bypass() could call irq_restore twice - Add Andrea and Changwoo as maintainers for better review coverage - selftests and tools/sched_ext build and other fixes * tag 'sched_ext-for-6.13-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext: sched_ext: Fix dsq_local_on selftest sched_ext: initialize kit->cursor.flags sched_ext: Fix invalid irq restore in scx_ops_bypass() MAINTAINERS: add me as reviewer for sched_ext MAINTAINERS: add self as reviewer for sched_ext scx: Fix maximal BPF selftest prog sched_ext: fix application of sizeof to pointer selftests/sched_ext: fix build after renames in sched_ext API sched_ext: Add __weak to fix the build errors
This commit is contained in:
commit
63676eefb7
@ -20907,6 +20907,8 @@ F: kernel/sched/
|
||||
SCHEDULER - SCHED_EXT
|
||||
R: Tejun Heo <tj@kernel.org>
|
||||
R: David Vernet <void@manifault.com>
|
||||
R: Andrea Righi <arighi@nvidia.com>
|
||||
R: Changwoo Min <changwoo@igalia.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://github.com/sched-ext/scx
|
||||
|
@ -4763,7 +4763,7 @@ static void scx_ops_bypass(bool bypass)
|
||||
* sees scx_rq_bypassing() before moving tasks to SCX.
|
||||
*/
|
||||
if (!scx_enabled()) {
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
rq_unlock(rq, &rf);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -7013,7 +7013,7 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
|
||||
return -ENOENT;
|
||||
|
||||
INIT_LIST_HEAD(&kit->cursor.node);
|
||||
kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags;
|
||||
kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
|
||||
kit->cursor.priv = READ_ONCE(kit->dsq->seq);
|
||||
|
||||
return 0;
|
||||
|
@ -40,9 +40,9 @@ void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_fl
|
||||
void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
|
||||
u32 scx_bpf_dispatch_nr_slots(void) __ksym;
|
||||
void scx_bpf_dispatch_cancel(void) __ksym;
|
||||
bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym;
|
||||
void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym;
|
||||
void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym;
|
||||
bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak;
|
||||
void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
|
||||
void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
|
||||
bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
|
||||
bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
|
||||
u32 scx_bpf_reenqueue_local(void) __ksym;
|
||||
|
@ -97,7 +97,7 @@ restart:
|
||||
SCX_BUG_ON(!cpuset, "Failed to allocate cpuset");
|
||||
CPU_ZERO(cpuset);
|
||||
CPU_SET(skel->rodata->central_cpu, cpuset);
|
||||
SCX_BUG_ON(sched_setaffinity(0, sizeof(cpuset), cpuset),
|
||||
SCX_BUG_ON(sched_setaffinity(0, sizeof(*cpuset), cpuset),
|
||||
"Failed to affinitize to central CPU %d (max %d)",
|
||||
skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1);
|
||||
CPU_FREE(cpuset);
|
||||
|
@ -20,7 +20,7 @@ s32 BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_select_cpu, struct task_struct *p,
|
||||
* If we dispatch to a bogus DSQ that will fall back to the
|
||||
* builtin global DSQ, we fail gracefully.
|
||||
*/
|
||||
scx_bpf_dispatch_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
|
||||
scx_bpf_dsq_insert_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
|
||||
p->scx.dsq_vtime, 0);
|
||||
return cpu;
|
||||
}
|
||||
|
@ -17,8 +17,8 @@ s32 BPF_STRUCT_OPS(ddsp_vtimelocal_fail_select_cpu, struct task_struct *p,
|
||||
|
||||
if (cpu >= 0) {
|
||||
/* Shouldn't be allowed to vtime dispatch to a builtin DSQ. */
|
||||
scx_bpf_dispatch_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
|
||||
p->scx.dsq_vtime, 0);
|
||||
scx_bpf_dsq_insert_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
|
||||
p->scx.dsq_vtime, 0);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
|
@ -43,9 +43,12 @@ void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev)
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
target = bpf_get_prandom_u32() % nr_cpus;
|
||||
if (p->nr_cpus_allowed == nr_cpus)
|
||||
target = bpf_get_prandom_u32() % nr_cpus;
|
||||
else
|
||||
target = scx_bpf_task_cpu(p);
|
||||
|
||||
scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
|
||||
bpf_task_release(p);
|
||||
}
|
||||
|
||||
|
@ -34,9 +34,10 @@ static enum scx_test_status run(void *ctx)
|
||||
/* Just sleeping is fine, plenty of scheduling events happening */
|
||||
sleep(1);
|
||||
|
||||
SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_ERROR));
|
||||
bpf_link__destroy(link);
|
||||
|
||||
SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_UNREG));
|
||||
|
||||
return SCX_TEST_PASS;
|
||||
}
|
||||
|
||||
@ -50,7 +51,7 @@ static void cleanup(void *ctx)
|
||||
struct scx_test dsp_local_on = {
|
||||
.name = "dsp_local_on",
|
||||
.description = "Verify we can directly dispatch tasks to a local DSQs "
|
||||
"from osp.dispatch()",
|
||||
"from ops.dispatch()",
|
||||
.setup = setup,
|
||||
.run = run,
|
||||
.cleanup = cleanup,
|
||||
|
@ -31,7 +31,7 @@ void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
|
||||
/* Can only call from ops.select_cpu() */
|
||||
scx_bpf_select_cpu_dfl(p, 0, 0, &found);
|
||||
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
|
@ -33,7 +33,7 @@ void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags)
|
||||
if (exit_point == EXIT_ENQUEUE)
|
||||
EXIT_CLEANLY();
|
||||
|
||||
scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
|
||||
@ -41,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
|
||||
if (exit_point == EXIT_DISPATCH)
|
||||
EXIT_CLEANLY();
|
||||
|
||||
scx_bpf_consume(DSQ_ID);
|
||||
scx_bpf_dsq_move_to_local(DSQ_ID);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
|
||||
|
@ -12,6 +12,8 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
#define DSQ_ID 0
|
||||
|
||||
s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
|
||||
u64 wake_flags)
|
||||
{
|
||||
@ -20,7 +22,7 @@ s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
|
||||
|
||||
void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags)
|
||||
{
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
|
||||
@ -28,7 +30,7 @@ void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
|
||||
|
||||
void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
|
||||
{
|
||||
scx_bpf_consume(SCX_DSQ_GLOBAL);
|
||||
scx_bpf_dsq_move_to_local(DSQ_ID);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags)
|
||||
@ -123,7 +125,7 @@ void BPF_STRUCT_OPS(maximal_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
|
||||
|
||||
s32 BPF_STRUCT_OPS_SLEEPABLE(maximal_init)
|
||||
{
|
||||
return 0;
|
||||
return scx_bpf_create_dsq(DSQ_ID, -1);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info)
|
||||
|
@ -30,7 +30,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
|
||||
}
|
||||
scx_bpf_put_idle_cpumask(idle_mask);
|
||||
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
|
@ -67,7 +67,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_enqueue, struct task_struct *p,
|
||||
saw_local = true;
|
||||
}
|
||||
|
||||
scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task,
|
||||
|
@ -29,7 +29,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_select_cpu, struct task_struct *p,
|
||||
cpu = prev_cpu;
|
||||
|
||||
dispatch:
|
||||
scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, 0);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_select_cpu, struct task_struct *p
|
||||
s32 prev_cpu, u64 wake_flags)
|
||||
{
|
||||
/* Dispatching to a random DSQ should fail. */
|
||||
scx_bpf_dispatch(p, 0xcafef00d, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dsq_insert(p, 0xcafef00d, SCX_SLICE_DFL, 0);
|
||||
|
||||
return prev_cpu;
|
||||
}
|
||||
|
@ -18,8 +18,8 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_select_cpu, struct task_struct *p
|
||||
s32 prev_cpu, u64 wake_flags)
|
||||
{
|
||||
/* Dispatching twice in a row is disallowed. */
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
|
||||
|
||||
return prev_cpu;
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
/*
|
||||
* A scheduler that validates that enqueue flags are properly stored and
|
||||
* applied at dispatch time when a task is directly dispatched from
|
||||
* ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and
|
||||
* making the test a very basic vtime scheduler.
|
||||
* ops.select_cpu(). We validate this by using scx_bpf_dsq_insert_vtime(),
|
||||
* and making the test a very basic vtime scheduler.
|
||||
*
|
||||
* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
|
||||
* Copyright (c) 2024 David Vernet <dvernet@meta.com>
|
||||
@ -47,13 +47,13 @@ s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p,
|
||||
cpu = prev_cpu;
|
||||
scx_bpf_test_and_clear_cpu_idle(cpu);
|
||||
ddsp:
|
||||
scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
|
||||
scx_bpf_dsq_insert_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
|
||||
{
|
||||
if (scx_bpf_consume(VTIME_DSQ))
|
||||
if (scx_bpf_dsq_move_to_local(VTIME_DSQ))
|
||||
consumed = true;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user