mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 10:42:11 +00:00
sched_ext: Fixes for v6.12-rc5
- Instances of scx_ops_bypass() could race each other leading to misbehavior. Fix by protecting the operation with a spinlock. - selftest and userspace header fixes. -----BEGIN PGP SIGNATURE----- iIQEABYKACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCZyF/5Q4cdGpAa2VybmVs Lm9yZwAKCRCxYfJx3gVYGRi+AP4+jGUz+O1LS0bCNj44Xlr0v6kci5dfJR7TlBv5 hwROcgEA84i7nRq6oJ1IkK7ItLbZYwgZyxqdn0Pgsq+oMWhgAwE= =R766 -----END PGP SIGNATURE----- Merge tag 'sched_ext-for-6.12-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext Pull sched_ext fixes from Tejun Heo: - Instances of scx_ops_bypass() could race each other leading to misbehavior. Fix by protecting the operation with a spinlock. - selftest and userspace header fixes * tag 'sched_ext-for-6.12-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext: sched_ext: Fix enq_last_no_enq_fails selftest sched_ext: Make cast_mask() inline scx: Fix raciness in scx_ops_bypass() scx: Fix exit selftest to use custom DSQ sched_ext: Fix function pointer type mismatches in BPF selftests selftests/sched_ext: add order-only dependency of runner.o on BPFOBJ
This commit is contained in:
commit
daa9f66fe1
@ -862,7 +862,8 @@ static DEFINE_MUTEX(scx_ops_enable_mutex);
|
||||
DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
|
||||
DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
|
||||
static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
|
||||
static atomic_t scx_ops_bypass_depth = ATOMIC_INIT(0);
|
||||
static int scx_ops_bypass_depth;
|
||||
static DEFINE_RAW_SPINLOCK(__scx_ops_bypass_lock);
|
||||
static bool scx_ops_init_task_enabled;
|
||||
static bool scx_switching_all;
|
||||
DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
|
||||
@ -4298,18 +4299,20 @@ bool task_should_scx(struct task_struct *p)
|
||||
*/
|
||||
static void scx_ops_bypass(bool bypass)
|
||||
{
|
||||
int depth, cpu;
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&__scx_ops_bypass_lock, flags);
|
||||
if (bypass) {
|
||||
depth = atomic_inc_return(&scx_ops_bypass_depth);
|
||||
WARN_ON_ONCE(depth <= 0);
|
||||
if (depth != 1)
|
||||
return;
|
||||
scx_ops_bypass_depth++;
|
||||
WARN_ON_ONCE(scx_ops_bypass_depth <= 0);
|
||||
if (scx_ops_bypass_depth != 1)
|
||||
goto unlock;
|
||||
} else {
|
||||
depth = atomic_dec_return(&scx_ops_bypass_depth);
|
||||
WARN_ON_ONCE(depth < 0);
|
||||
if (depth != 0)
|
||||
return;
|
||||
scx_ops_bypass_depth--;
|
||||
WARN_ON_ONCE(scx_ops_bypass_depth < 0);
|
||||
if (scx_ops_bypass_depth != 0)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4326,7 +4329,7 @@ static void scx_ops_bypass(bool bypass)
|
||||
struct rq_flags rf;
|
||||
struct task_struct *p, *n;
|
||||
|
||||
rq_lock_irqsave(rq, &rf);
|
||||
rq_lock(rq, &rf);
|
||||
|
||||
if (bypass) {
|
||||
WARN_ON_ONCE(rq->scx.flags & SCX_RQ_BYPASSING);
|
||||
@ -4362,11 +4365,13 @@ static void scx_ops_bypass(bool bypass)
|
||||
sched_enq_and_set_task(&ctx);
|
||||
}
|
||||
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
rq_unlock(rq, &rf);
|
||||
|
||||
/* resched to restore ticks and idle state */
|
||||
resched_cpu(cpu);
|
||||
}
|
||||
unlock:
|
||||
raw_spin_unlock_irqrestore(&__scx_ops_bypass_lock, flags);
|
||||
}
|
||||
|
||||
static void free_exit_info(struct scx_exit_info *ei)
|
||||
|
@ -320,7 +320,7 @@ u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
|
||||
/*
|
||||
* Access a cpumask in read-only mode (typically to check bits).
|
||||
*/
|
||||
const struct cpumask *cast_mask(struct bpf_cpumask *mask)
|
||||
static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask)
|
||||
{
|
||||
return (const struct cpumask *)mask;
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ auto-test-targets := \
|
||||
|
||||
testcase-targets := $(addsuffix .o,$(addprefix $(SCXOBJ_DIR)/,$(auto-test-targets)))
|
||||
|
||||
$(SCXOBJ_DIR)/runner.o: runner.c | $(SCXOBJ_DIR)
|
||||
$(SCXOBJ_DIR)/runner.o: runner.c | $(SCXOBJ_DIR) $(BPFOBJ)
|
||||
$(CC) $(CFLAGS) -c $< -o $@
|
||||
|
||||
# Create all of the test targets object files, whose testcase objects will be
|
||||
|
@ -51,8 +51,8 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(create_dsq_init)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops create_dsq_ops = {
|
||||
.init_task = create_dsq_init_task,
|
||||
.exit_task = create_dsq_exit_task,
|
||||
.init = create_dsq_init,
|
||||
.init_task = (void *) create_dsq_init_task,
|
||||
.exit_task = (void *) create_dsq_exit_task,
|
||||
.init = (void *) create_dsq_init,
|
||||
.name = "create_dsq",
|
||||
};
|
||||
|
@ -35,8 +35,8 @@ void BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_exit, struct scx_exit_info *ei)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops ddsp_bogus_dsq_fail_ops = {
|
||||
.select_cpu = ddsp_bogus_dsq_fail_select_cpu,
|
||||
.exit = ddsp_bogus_dsq_fail_exit,
|
||||
.select_cpu = (void *) ddsp_bogus_dsq_fail_select_cpu,
|
||||
.exit = (void *) ddsp_bogus_dsq_fail_exit,
|
||||
.name = "ddsp_bogus_dsq_fail",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
@ -32,8 +32,8 @@ void BPF_STRUCT_OPS(ddsp_vtimelocal_fail_exit, struct scx_exit_info *ei)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops ddsp_vtimelocal_fail_ops = {
|
||||
.select_cpu = ddsp_vtimelocal_fail_select_cpu,
|
||||
.exit = ddsp_vtimelocal_fail_exit,
|
||||
.select_cpu = (void *) ddsp_vtimelocal_fail_select_cpu,
|
||||
.exit = (void *) ddsp_vtimelocal_fail_exit,
|
||||
.name = "ddsp_vtimelocal_fail",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
@ -56,10 +56,10 @@ void BPF_STRUCT_OPS(dsp_local_on_exit, struct scx_exit_info *ei)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops dsp_local_on_ops = {
|
||||
.select_cpu = dsp_local_on_select_cpu,
|
||||
.enqueue = dsp_local_on_enqueue,
|
||||
.dispatch = dsp_local_on_dispatch,
|
||||
.exit = dsp_local_on_exit,
|
||||
.select_cpu = (void *) dsp_local_on_select_cpu,
|
||||
.enqueue = (void *) dsp_local_on_enqueue,
|
||||
.dispatch = (void *) dsp_local_on_dispatch,
|
||||
.exit = (void *) dsp_local_on_exit,
|
||||
.name = "dsp_local_on",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
@ -12,10 +12,18 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
u32 exit_kind;
|
||||
|
||||
void BPF_STRUCT_OPS_SLEEPABLE(enq_last_no_enq_fails_exit, struct scx_exit_info *info)
|
||||
{
|
||||
exit_kind = info->kind;
|
||||
}
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops enq_last_no_enq_fails_ops = {
|
||||
.name = "enq_last_no_enq_fails",
|
||||
/* Need to define ops.enqueue() with SCX_OPS_ENQ_LAST */
|
||||
.flags = SCX_OPS_ENQ_LAST,
|
||||
.exit = (void *) enq_last_no_enq_fails_exit,
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
@ -31,8 +31,12 @@ static enum scx_test_status run(void *ctx)
|
||||
struct bpf_link *link;
|
||||
|
||||
link = bpf_map__attach_struct_ops(skel->maps.enq_last_no_enq_fails_ops);
|
||||
if (link) {
|
||||
SCX_ERR("Incorrectly succeeded in to attaching scheduler");
|
||||
if (!link) {
|
||||
SCX_ERR("Incorrectly failed at attaching scheduler");
|
||||
return SCX_TEST_FAIL;
|
||||
}
|
||||
if (!skel->bss->exit_kind) {
|
||||
SCX_ERR("Incorrectly stayed loaded");
|
||||
return SCX_TEST_FAIL;
|
||||
}
|
||||
|
||||
@ -50,7 +54,7 @@ static void cleanup(void *ctx)
|
||||
|
||||
struct scx_test enq_last_no_enq_fails = {
|
||||
.name = "enq_last_no_enq_fails",
|
||||
.description = "Verify we fail to load a scheduler if we specify "
|
||||
.description = "Verify we eject a scheduler if we specify "
|
||||
"the SCX_OPS_ENQ_LAST flag without defining "
|
||||
"ops.enqueue()",
|
||||
.setup = setup,
|
||||
|
@ -36,8 +36,8 @@ void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops enq_select_cpu_fails_ops = {
|
||||
.select_cpu = enq_select_cpu_fails_select_cpu,
|
||||
.enqueue = enq_select_cpu_fails_enqueue,
|
||||
.select_cpu = (void *) enq_select_cpu_fails_select_cpu,
|
||||
.enqueue = (void *) enq_select_cpu_fails_enqueue,
|
||||
.name = "enq_select_cpu_fails",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
@ -15,6 +15,8 @@ UEI_DEFINE(uei);
|
||||
|
||||
#define EXIT_CLEANLY() scx_bpf_exit(exit_point, "%d", exit_point)
|
||||
|
||||
#define DSQ_ID 0
|
||||
|
||||
s32 BPF_STRUCT_OPS(exit_select_cpu, struct task_struct *p,
|
||||
s32 prev_cpu, u64 wake_flags)
|
||||
{
|
||||
@ -31,7 +33,7 @@ void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags)
|
||||
if (exit_point == EXIT_ENQUEUE)
|
||||
EXIT_CLEANLY();
|
||||
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
|
||||
@ -39,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
|
||||
if (exit_point == EXIT_DISPATCH)
|
||||
EXIT_CLEANLY();
|
||||
|
||||
scx_bpf_consume(SCX_DSQ_GLOBAL);
|
||||
scx_bpf_consume(DSQ_ID);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
|
||||
@ -67,18 +69,18 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(exit_init)
|
||||
if (exit_point == EXIT_INIT)
|
||||
EXIT_CLEANLY();
|
||||
|
||||
return 0;
|
||||
return scx_bpf_create_dsq(DSQ_ID, -1);
|
||||
}
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops exit_ops = {
|
||||
.select_cpu = exit_select_cpu,
|
||||
.enqueue = exit_enqueue,
|
||||
.dispatch = exit_dispatch,
|
||||
.init_task = exit_init_task,
|
||||
.enable = exit_enable,
|
||||
.exit = exit_exit,
|
||||
.init = exit_init,
|
||||
.select_cpu = (void *) exit_select_cpu,
|
||||
.enqueue = (void *) exit_enqueue,
|
||||
.dispatch = (void *) exit_dispatch,
|
||||
.init_task = (void *) exit_init_task,
|
||||
.enable = (void *) exit_enable,
|
||||
.exit = (void *) exit_exit,
|
||||
.init = (void *) exit_init,
|
||||
.name = "exit",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
@ -46,16 +46,16 @@ void BPF_STRUCT_OPS_SLEEPABLE(hotplug_cpu_offline, s32 cpu)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops hotplug_cb_ops = {
|
||||
.cpu_online = hotplug_cpu_online,
|
||||
.cpu_offline = hotplug_cpu_offline,
|
||||
.exit = hotplug_exit,
|
||||
.cpu_online = (void *) hotplug_cpu_online,
|
||||
.cpu_offline = (void *) hotplug_cpu_offline,
|
||||
.exit = (void *) hotplug_exit,
|
||||
.name = "hotplug_cbs",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops hotplug_nocb_ops = {
|
||||
.exit = hotplug_exit,
|
||||
.exit = (void *) hotplug_exit,
|
||||
.name = "hotplug_nocbs",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
@ -45,9 +45,9 @@ void BPF_STRUCT_OPS(cnt_disable, struct task_struct *p)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops init_enable_count_ops = {
|
||||
.init_task = cnt_init_task,
|
||||
.exit_task = cnt_exit_task,
|
||||
.enable = cnt_enable,
|
||||
.disable = cnt_disable,
|
||||
.init_task = (void *) cnt_init_task,
|
||||
.exit_task = (void *) cnt_exit_task,
|
||||
.enable = (void *) cnt_enable,
|
||||
.disable = (void *) cnt_disable,
|
||||
.name = "init_enable_count",
|
||||
};
|
||||
|
@ -131,34 +131,34 @@ void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops maximal_ops = {
|
||||
.select_cpu = maximal_select_cpu,
|
||||
.enqueue = maximal_enqueue,
|
||||
.dequeue = maximal_dequeue,
|
||||
.dispatch = maximal_dispatch,
|
||||
.runnable = maximal_runnable,
|
||||
.running = maximal_running,
|
||||
.stopping = maximal_stopping,
|
||||
.quiescent = maximal_quiescent,
|
||||
.yield = maximal_yield,
|
||||
.core_sched_before = maximal_core_sched_before,
|
||||
.set_weight = maximal_set_weight,
|
||||
.set_cpumask = maximal_set_cpumask,
|
||||
.update_idle = maximal_update_idle,
|
||||
.cpu_acquire = maximal_cpu_acquire,
|
||||
.cpu_release = maximal_cpu_release,
|
||||
.cpu_online = maximal_cpu_online,
|
||||
.cpu_offline = maximal_cpu_offline,
|
||||
.init_task = maximal_init_task,
|
||||
.enable = maximal_enable,
|
||||
.exit_task = maximal_exit_task,
|
||||
.disable = maximal_disable,
|
||||
.cgroup_init = maximal_cgroup_init,
|
||||
.cgroup_exit = maximal_cgroup_exit,
|
||||
.cgroup_prep_move = maximal_cgroup_prep_move,
|
||||
.cgroup_move = maximal_cgroup_move,
|
||||
.cgroup_cancel_move = maximal_cgroup_cancel_move,
|
||||
.cgroup_set_weight = maximal_cgroup_set_weight,
|
||||
.init = maximal_init,
|
||||
.exit = maximal_exit,
|
||||
.select_cpu = (void *) maximal_select_cpu,
|
||||
.enqueue = (void *) maximal_enqueue,
|
||||
.dequeue = (void *) maximal_dequeue,
|
||||
.dispatch = (void *) maximal_dispatch,
|
||||
.runnable = (void *) maximal_runnable,
|
||||
.running = (void *) maximal_running,
|
||||
.stopping = (void *) maximal_stopping,
|
||||
.quiescent = (void *) maximal_quiescent,
|
||||
.yield = (void *) maximal_yield,
|
||||
.core_sched_before = (void *) maximal_core_sched_before,
|
||||
.set_weight = (void *) maximal_set_weight,
|
||||
.set_cpumask = (void *) maximal_set_cpumask,
|
||||
.update_idle = (void *) maximal_update_idle,
|
||||
.cpu_acquire = (void *) maximal_cpu_acquire,
|
||||
.cpu_release = (void *) maximal_cpu_release,
|
||||
.cpu_online = (void *) maximal_cpu_online,
|
||||
.cpu_offline = (void *) maximal_cpu_offline,
|
||||
.init_task = (void *) maximal_init_task,
|
||||
.enable = (void *) maximal_enable,
|
||||
.exit_task = (void *) maximal_exit_task,
|
||||
.disable = (void *) maximal_disable,
|
||||
.cgroup_init = (void *) maximal_cgroup_init,
|
||||
.cgroup_exit = (void *) maximal_cgroup_exit,
|
||||
.cgroup_prep_move = (void *) maximal_cgroup_prep_move,
|
||||
.cgroup_move = (void *) maximal_cgroup_move,
|
||||
.cgroup_cancel_move = (void *) maximal_cgroup_cancel_move,
|
||||
.cgroup_set_weight = (void *) maximal_cgroup_set_weight,
|
||||
.init = (void *) maximal_init,
|
||||
.exit = (void *) maximal_exit,
|
||||
.name = "maximal",
|
||||
};
|
||||
|
@ -29,8 +29,8 @@ bool BPF_STRUCT_OPS(maybe_null_success_yield, struct task_struct *from,
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops maybe_null_success = {
|
||||
.dispatch = maybe_null_success_dispatch,
|
||||
.yield = maybe_null_success_yield,
|
||||
.enable = maybe_null_running,
|
||||
.dispatch = (void *) maybe_null_success_dispatch,
|
||||
.yield = (void *) maybe_null_success_yield,
|
||||
.enable = (void *) maybe_null_running,
|
||||
.name = "minimal",
|
||||
};
|
||||
|
@ -19,7 +19,7 @@ void BPF_STRUCT_OPS(maybe_null_fail_dispatch, s32 cpu, struct task_struct *p)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops maybe_null_fail = {
|
||||
.dispatch = maybe_null_fail_dispatch,
|
||||
.enable = maybe_null_running,
|
||||
.dispatch = (void *) maybe_null_fail_dispatch,
|
||||
.enable = (void *) maybe_null_running,
|
||||
.name = "maybe_null_fail_dispatch",
|
||||
};
|
||||
|
@ -22,7 +22,7 @@ bool BPF_STRUCT_OPS(maybe_null_fail_yield, struct task_struct *from,
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops maybe_null_fail = {
|
||||
.yield = maybe_null_fail_yield,
|
||||
.enable = maybe_null_running,
|
||||
.yield = (void *) maybe_null_fail_yield,
|
||||
.enable = (void *) maybe_null_running,
|
||||
.name = "maybe_null_fail_yield",
|
||||
};
|
||||
|
@ -28,6 +28,6 @@ void BPF_STRUCT_OPS(prog_run_exit, struct scx_exit_info *ei)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops prog_run_ops = {
|
||||
.exit = prog_run_exit,
|
||||
.exit = (void *) prog_run_exit,
|
||||
.name = "prog_run",
|
||||
};
|
||||
|
@ -35,6 +35,6 @@ void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops select_cpu_dfl_ops = {
|
||||
.enqueue = select_cpu_dfl_enqueue,
|
||||
.enqueue = (void *) select_cpu_dfl_enqueue,
|
||||
.name = "select_cpu_dfl",
|
||||
};
|
||||
|
@ -82,8 +82,8 @@ s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task,
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops select_cpu_dfl_nodispatch_ops = {
|
||||
.select_cpu = select_cpu_dfl_nodispatch_select_cpu,
|
||||
.enqueue = select_cpu_dfl_nodispatch_enqueue,
|
||||
.init_task = select_cpu_dfl_nodispatch_init_task,
|
||||
.select_cpu = (void *) select_cpu_dfl_nodispatch_select_cpu,
|
||||
.enqueue = (void *) select_cpu_dfl_nodispatch_enqueue,
|
||||
.init_task = (void *) select_cpu_dfl_nodispatch_init_task,
|
||||
.name = "select_cpu_dfl_nodispatch",
|
||||
};
|
||||
|
@ -35,7 +35,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_select_cpu, struct task_struct *p,
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops select_cpu_dispatch_ops = {
|
||||
.select_cpu = select_cpu_dispatch_select_cpu,
|
||||
.select_cpu = (void *) select_cpu_dispatch_select_cpu,
|
||||
.name = "select_cpu_dispatch",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
@ -30,8 +30,8 @@ void BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_exit, struct scx_exit_info *ei)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops select_cpu_dispatch_bad_dsq_ops = {
|
||||
.select_cpu = select_cpu_dispatch_bad_dsq_select_cpu,
|
||||
.exit = select_cpu_dispatch_bad_dsq_exit,
|
||||
.select_cpu = (void *) select_cpu_dispatch_bad_dsq_select_cpu,
|
||||
.exit = (void *) select_cpu_dispatch_bad_dsq_exit,
|
||||
.name = "select_cpu_dispatch_bad_dsq",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
@ -31,8 +31,8 @@ void BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_exit, struct scx_exit_info *ei)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops select_cpu_dispatch_dbl_dsp_ops = {
|
||||
.select_cpu = select_cpu_dispatch_dbl_dsp_select_cpu,
|
||||
.exit = select_cpu_dispatch_dbl_dsp_exit,
|
||||
.select_cpu = (void *) select_cpu_dispatch_dbl_dsp_select_cpu,
|
||||
.exit = (void *) select_cpu_dispatch_dbl_dsp_exit,
|
||||
.name = "select_cpu_dispatch_dbl_dsp",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
@ -81,12 +81,12 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(select_cpu_vtime_init)
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
struct sched_ext_ops select_cpu_vtime_ops = {
|
||||
.select_cpu = select_cpu_vtime_select_cpu,
|
||||
.dispatch = select_cpu_vtime_dispatch,
|
||||
.running = select_cpu_vtime_running,
|
||||
.stopping = select_cpu_vtime_stopping,
|
||||
.enable = select_cpu_vtime_enable,
|
||||
.init = select_cpu_vtime_init,
|
||||
.select_cpu = (void *) select_cpu_vtime_select_cpu,
|
||||
.dispatch = (void *) select_cpu_vtime_dispatch,
|
||||
.running = (void *) select_cpu_vtime_running,
|
||||
.stopping = (void *) select_cpu_vtime_stopping,
|
||||
.enable = (void *) select_cpu_vtime_enable,
|
||||
.init = (void *) select_cpu_vtime_init,
|
||||
.name = "select_cpu_vtime",
|
||||
.timeout_ms = 1000U,
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user