mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 11:37:47 +00:00
245254f708
Scheduler classes are strictly ordered and when a higher priority class has tasks to run, the lower priority ones lose access to the CPU. Being able to monitor and act on these events are necessary for use cases includling strict core-scheduling and latency management. This patch adds two operations ops.cpu_acquire() and .cpu_release(). The former is invoked when a CPU becomes available to the BPF scheduler and the opposite for the latter. This patch also implements scx_bpf_reenqueue_local() which can be called from .cpu_release() to trigger requeueing of all tasks in the local dsq of the CPU so that the tasks can be reassigned to other available CPUs. scx_pair is updated to use .cpu_acquire/release() along with %SCX_KICK_WAIT to make the pair scheduling guarantee strict even when a CPU is preempted by a higher priority scheduler class. scx_qmap is updated to use .cpu_acquire/release() to empty the local dsq of a preempted CPU. A similar approach can be adopted by BPF schedulers that want to have a tight control over latency. v4: Use the new SCX_KICK_IDLE to wake up a CPU after re-enqueueing. v3: Drop the const qualifier from scx_cpu_release_args.task. BPF enforces access control through the verifier, so the qualifier isn't actually operative and only gets in the way when interacting with various helpers. v2: Add p->scx.kf_mask annotation to allow calling scx_bpf_reenqueue_local() from ops.cpu_release() nested inside ops.init() and other sleepable operations. Signed-off-by: David Vernet <dvernet@meta.com> Reviewed-by: Tejun Heo <tj@kernel.org> Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Josh Don <joshdon@google.com> Acked-by: Hao Luo <haoluo@google.com> Acked-by: Barret Rhoden <brho@google.com>
104 lines
3.5 KiB
C
104 lines
3.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.
|
|
* Copyright (c) 2022 Tejun Heo <tj@kernel.org>
|
|
* Copyright (c) 2022 David Vernet <dvernet@meta.com>
|
|
*/
|
|
#ifdef CONFIG_SCHED_CLASS_EXT
|
|
|
|
struct sched_enq_and_set_ctx {
|
|
struct task_struct *p;
|
|
int queue_flags;
|
|
bool queued;
|
|
bool running;
|
|
};
|
|
|
|
void sched_deq_and_put_task(struct task_struct *p, int queue_flags,
|
|
struct sched_enq_and_set_ctx *ctx);
|
|
void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx);
|
|
|
|
extern const struct sched_class ext_sched_class;
|
|
|
|
DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled);
|
|
DECLARE_STATIC_KEY_FALSE(__scx_switched_all);
|
|
#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled)
|
|
#define scx_switched_all() static_branch_unlikely(&__scx_switched_all)
|
|
|
|
DECLARE_STATIC_KEY_FALSE(scx_ops_cpu_preempt);
|
|
|
|
static inline bool task_on_scx(const struct task_struct *p)
|
|
{
|
|
return scx_enabled() && p->sched_class == &ext_sched_class;
|
|
}
|
|
|
|
void scx_next_task_picked(struct rq *rq, struct task_struct *p,
|
|
const struct sched_class *active);
|
|
void scx_tick(struct rq *rq);
|
|
void init_scx_entity(struct sched_ext_entity *scx);
|
|
void scx_pre_fork(struct task_struct *p);
|
|
int scx_fork(struct task_struct *p);
|
|
void scx_post_fork(struct task_struct *p);
|
|
void scx_cancel_fork(struct task_struct *p);
|
|
bool scx_can_stop_tick(struct rq *rq);
|
|
int scx_check_setscheduler(struct task_struct *p, int policy);
|
|
bool task_should_scx(struct task_struct *p);
|
|
void init_sched_ext_class(void);
|
|
|
|
static inline const struct sched_class *next_active_class(const struct sched_class *class)
|
|
{
|
|
class++;
|
|
if (scx_switched_all() && class == &fair_sched_class)
|
|
class++;
|
|
if (!scx_enabled() && class == &ext_sched_class)
|
|
class++;
|
|
return class;
|
|
}
|
|
|
|
#define for_active_class_range(class, _from, _to) \
|
|
for (class = (_from); class != (_to); class = next_active_class(class))
|
|
|
|
#define for_each_active_class(class) \
|
|
for_active_class_range(class, __sched_class_highest, __sched_class_lowest)
|
|
|
|
/*
|
|
* SCX requires a balance() call before every pick_next_task() call including
|
|
* when waking up from idle.
|
|
*/
|
|
#define for_balance_class_range(class, prev_class, end_class) \
|
|
for_active_class_range(class, (prev_class) > &ext_sched_class ? \
|
|
&ext_sched_class : (prev_class), (end_class))
|
|
|
|
#else /* CONFIG_SCHED_CLASS_EXT */
|
|
|
|
#define scx_enabled() false
|
|
#define scx_switched_all() false
|
|
|
|
static inline void scx_next_task_picked(struct rq *rq, struct task_struct *p,
|
|
const struct sched_class *active) {}
|
|
static inline void scx_tick(struct rq *rq) {}
|
|
static inline void scx_pre_fork(struct task_struct *p) {}
|
|
static inline int scx_fork(struct task_struct *p) { return 0; }
|
|
static inline void scx_post_fork(struct task_struct *p) {}
|
|
static inline void scx_cancel_fork(struct task_struct *p) {}
|
|
static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
|
|
static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
|
|
static inline bool task_on_scx(const struct task_struct *p) { return false; }
|
|
static inline void init_sched_ext_class(void) {}
|
|
|
|
#define for_each_active_class for_each_class
|
|
#define for_balance_class_range for_class_range
|
|
|
|
#endif /* CONFIG_SCHED_CLASS_EXT */
|
|
|
|
#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
|
|
void __scx_update_idle(struct rq *rq, bool idle);
|
|
|
|
static inline void scx_update_idle(struct rq *rq, bool idle)
|
|
{
|
|
if (scx_enabled())
|
|
__scx_update_idle(rq, idle);
|
|
}
|
|
#else
|
|
static inline void scx_update_idle(struct rq *rq, bool idle) {}
|
|
#endif
|