/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. * Copyright (c) 2022 Tejun Heo * Copyright (c) 2022 David Vernet */ #ifdef CONFIG_SCHED_CLASS_EXT struct sched_enq_and_set_ctx { struct task_struct *p; int queue_flags; bool queued; bool running; }; void sched_deq_and_put_task(struct task_struct *p, int queue_flags, struct sched_enq_and_set_ctx *ctx); void sched_enq_and_set_task(struct sched_enq_and_set_ctx *ctx); extern const struct sched_class ext_sched_class; DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); DECLARE_STATIC_KEY_FALSE(__scx_switched_all); #define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) #define scx_switched_all() static_branch_unlikely(&__scx_switched_all) DECLARE_STATIC_KEY_FALSE(scx_ops_cpu_preempt); static inline bool task_on_scx(const struct task_struct *p) { return scx_enabled() && p->sched_class == &ext_sched_class; } void scx_next_task_picked(struct rq *rq, struct task_struct *p, const struct sched_class *active); void scx_tick(struct rq *rq); void init_scx_entity(struct sched_ext_entity *scx); void scx_pre_fork(struct task_struct *p); int scx_fork(struct task_struct *p); void scx_post_fork(struct task_struct *p); void scx_cancel_fork(struct task_struct *p); bool scx_can_stop_tick(struct rq *rq); int scx_check_setscheduler(struct task_struct *p, int policy); bool task_should_scx(struct task_struct *p); void init_sched_ext_class(void); static inline const struct sched_class *next_active_class(const struct sched_class *class) { class++; if (scx_switched_all() && class == &fair_sched_class) class++; if (!scx_enabled() && class == &ext_sched_class) class++; return class; } #define for_active_class_range(class, _from, _to) \ for (class = (_from); class != (_to); class = next_active_class(class)) #define for_each_active_class(class) \ for_active_class_range(class, __sched_class_highest, __sched_class_lowest) /* * SCX requires a balance() call before every pick_next_task() call including * when waking up from idle. */ #define for_balance_class_range(class, prev_class, end_class) \ for_active_class_range(class, (prev_class) > &ext_sched_class ? \ &ext_sched_class : (prev_class), (end_class)) #else /* CONFIG_SCHED_CLASS_EXT */ #define scx_enabled() false #define scx_switched_all() false static inline void scx_next_task_picked(struct rq *rq, struct task_struct *p, const struct sched_class *active) {} static inline void scx_tick(struct rq *rq) {} static inline void scx_pre_fork(struct task_struct *p) {} static inline int scx_fork(struct task_struct *p) { return 0; } static inline void scx_post_fork(struct task_struct *p) {} static inline void scx_cancel_fork(struct task_struct *p) {} static inline bool scx_can_stop_tick(struct rq *rq) { return true; } static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; } static inline bool task_on_scx(const struct task_struct *p) { return false; } static inline void init_sched_ext_class(void) {} #define for_each_active_class for_each_class #define for_balance_class_range for_class_range #endif /* CONFIG_SCHED_CLASS_EXT */ #if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP) void __scx_update_idle(struct rq *rq, bool idle); static inline void scx_update_idle(struct rq *rq, bool idle) { if (scx_enabled()) __scx_update_idle(rq, idle); } #else static inline void scx_update_idle(struct rq *rq, bool idle) {} #endif