From 4572541892ea4e1dade2e9c1313d3f8069d37f0a Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Wed, 20 Nov 2024 15:29:03 +0100 Subject: [PATCH 01/18] sched_ext: Use the NUMA scheduling domain for NUMA optimizations Rely on the NUMA scheduling domain topology, instead of accessing NUMA topology information directly. There is basically no functional change, but in this way we ensure consistent use of the same topology information determined by the scheduling subsystem. Fixes: f6ce6b949304 ("sched_ext: Do not enable LLC/NUMA optimizations when domains overlap") Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 114 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 86 insertions(+), 28 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 7fff1d045477..71342f3719c1 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3215,6 +3215,74 @@ found: goto retry; } +/* + * Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC + * domain is not defined). + */ +static unsigned int llc_weight(s32 cpu) +{ + struct sched_domain *sd; + + sd = rcu_dereference(per_cpu(sd_llc, cpu)); + if (!sd) + return 0; + + return sd->span_weight; +} + +/* + * Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC + * domain is not defined). + */ +static struct cpumask *llc_span(s32 cpu) +{ + struct sched_domain *sd; + + sd = rcu_dereference(per_cpu(sd_llc, cpu)); + if (!sd) + return 0; + + return sched_domain_span(sd); +} + +/* + * Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the + * NUMA domain is not defined). + */ +static unsigned int numa_weight(s32 cpu) +{ + struct sched_domain *sd; + struct sched_group *sg; + + sd = rcu_dereference(per_cpu(sd_numa, cpu)); + if (!sd) + return 0; + sg = sd->groups; + if (!sg) + return 0; + + return sg->group_weight; +} + +/* + * Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA + * domain is not defined). + */ +static struct cpumask *numa_span(s32 cpu) +{ + struct sched_domain *sd; + struct sched_group *sg; + + sd = rcu_dereference(per_cpu(sd_numa, cpu)); + if (!sd) + return NULL; + sg = sd->groups; + if (!sg) + return NULL; + + return sched_group_span(sg); +} + /* * Return true if the LLC domains do not perfectly overlap with the NUMA * domains, false otherwise. @@ -3246,19 +3314,10 @@ static bool llc_numa_mismatch(void) * overlapping, which is incorrect (as NUMA 1 has two distinct LLC * domains). */ - for_each_online_cpu(cpu) { - const struct cpumask *numa_cpus; - struct sched_domain *sd; - - sd = rcu_dereference(per_cpu(sd_llc, cpu)); - if (!sd) + for_each_online_cpu(cpu) + if (llc_weight(cpu) != numa_weight(cpu)) return true; - numa_cpus = cpumask_of_node(cpu_to_node(cpu)); - if (sd->span_weight != cpumask_weight(numa_cpus)) - return true; - } - return false; } @@ -3276,8 +3335,7 @@ static bool llc_numa_mismatch(void) static void update_selcpu_topology(void) { bool enable_llc = false, enable_numa = false; - struct sched_domain *sd; - const struct cpumask *cpus; + unsigned int nr_cpus; s32 cpu = cpumask_first(cpu_online_mask); /* @@ -3291,10 +3349,12 @@ static void update_selcpu_topology(void) * CPUs. */ rcu_read_lock(); - sd = rcu_dereference(per_cpu(sd_llc, cpu)); - if (sd) { - if (sd->span_weight < num_online_cpus()) + nr_cpus = llc_weight(cpu); + if (nr_cpus > 0) { + if (nr_cpus < num_online_cpus()) enable_llc = true; + pr_debug("sched_ext: LLC=%*pb weight=%u\n", + cpumask_pr_args(llc_span(cpu)), llc_weight(cpu)); } /* @@ -3306,9 +3366,13 @@ static void update_selcpu_topology(void) * enabling both NUMA and LLC optimizations is unnecessary, as checking * for an idle CPU in the same domain twice is redundant. */ - cpus = cpumask_of_node(cpu_to_node(cpu)); - if ((cpumask_weight(cpus) < num_online_cpus()) && llc_numa_mismatch()) - enable_numa = true; + nr_cpus = numa_weight(cpu); + if (nr_cpus > 0) { + if (nr_cpus < num_online_cpus() && llc_numa_mismatch()) + enable_numa = true; + pr_debug("sched_ext: NUMA=%*pb weight=%u\n", + cpumask_pr_args(numa_span(cpu)), numa_weight(cpu)); + } rcu_read_unlock(); pr_debug("sched_ext: LLC idle selection %s\n", @@ -3360,7 +3424,6 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, *found = false; - /* * This is necessary to protect llc_cpus. */ @@ -3379,15 +3442,10 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, */ if (p->nr_cpus_allowed >= num_possible_cpus()) { if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa)) - numa_cpus = cpumask_of_node(cpu_to_node(prev_cpu)); + numa_cpus = numa_span(prev_cpu); - if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) { - struct sched_domain *sd; - - sd = rcu_dereference(per_cpu(sd_llc, prev_cpu)); - if (sd) - llc_cpus = sched_domain_span(sd); - } + if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) + llc_cpus = llc_span(prev_cpu); } /* From 8da7bf2cee2735dbd2478cf07672ff0d243ce6ed Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 12 Dec 2024 16:16:57 -1000 Subject: [PATCH 02/18] tools/sched_ext: Receive updates from SCX repo Receive tools/sched_ext updates form https://github.com/sched-ext/scx to sync userspace bits: - scx_bpf_dump_header() added which can be used to print out basic scheduler info on dump. - BPF possible/online CPU iterators added. - CO-RE enums added. The enums are autogenerated from vmlinux.h. Include the generated artifacts in tools/sched_ext to keep the Makefile simpler. - Other misc changes. Signed-off-by: Tejun Heo --- tools/sched_ext/include/scx/common.bpf.h | 83 +++++++++++++- tools/sched_ext/include/scx/common.h | 6 + tools/sched_ext/include/scx/compat.h | 1 + .../sched_ext/include/scx/enums.autogen.bpf.h | 105 ++++++++++++++++++ tools/sched_ext/include/scx/enums.autogen.h | 41 +++++++ tools/sched_ext/include/scx/enums.bpf.h | 12 ++ tools/sched_ext/include/scx/enums.h | 27 +++++ tools/sched_ext/include/scx/user_exit_info.h | 9 +- tools/sched_ext/scx_central.bpf.c | 2 +- tools/sched_ext/scx_central.c | 1 + tools/sched_ext/scx_flatcg.bpf.c | 2 +- tools/sched_ext/scx_flatcg.c | 1 + tools/sched_ext/scx_qmap.bpf.c | 2 +- tools/sched_ext/scx_qmap.c | 2 + 14 files changed, 286 insertions(+), 8 deletions(-) create mode 100644 tools/sched_ext/include/scx/enums.autogen.bpf.h create mode 100644 tools/sched_ext/include/scx/enums.autogen.h create mode 100644 tools/sched_ext/include/scx/enums.bpf.h create mode 100644 tools/sched_ext/include/scx/enums.h diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h index 625f5b046776..858ba1f438f6 100644 --- a/tools/sched_ext/include/scx/common.bpf.h +++ b/tools/sched_ext/include/scx/common.bpf.h @@ -9,7 +9,7 @@ #ifdef LSP #define __bpf__ -#include "../vmlinux/vmlinux.h" +#include "../vmlinux.h" #else #include "vmlinux.h" #endif @@ -24,6 +24,10 @@ #define PF_EXITING 0x00000004 #define CLOCK_MONOTONIC 1 +extern int LINUX_KERNEL_VERSION __kconfig; +extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak; +extern const char CONFIG_LOCALVERSION[64] __kconfig __weak; + /* * Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can * lead to really confusing misbehaviors. Let's trigger a build failure. @@ -98,7 +102,7 @@ void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {} _Pragma("GCC diagnostic push") \ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ ___bpf_fill(___param, args); \ - _Pragma("GCC diagnostic pop") \ + _Pragma("GCC diagnostic pop") /* * scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments @@ -136,6 +140,20 @@ void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {} ___scx_bpf_bstr_format_checker(fmt, ##args); \ }) +/* + * scx_bpf_dump_header() is a wrapper around scx_bpf_dump that adds a header + * of system information for debugging. + */ +#define scx_bpf_dump_header() \ +({ \ + scx_bpf_dump("kernel: %d.%d.%d %s\ncc: %s\n", \ + LINUX_KERNEL_VERSION >> 16, \ + LINUX_KERNEL_VERSION >> 8 & 0xFF, \ + LINUX_KERNEL_VERSION & 0xFF, \ + CONFIG_LOCALVERSION, \ + CONFIG_CC_VERSION_TEXT); \ +}) + #define BPF_STRUCT_OPS(name, args...) \ SEC("struct_ops/"#name) \ BPF_PROG(name, ##args) @@ -317,6 +335,66 @@ u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym; u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym; +int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) __ksym; +int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym; +void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym; + +#define def_iter_struct(name) \ +struct bpf_iter_##name { \ + struct bpf_iter_bits it; \ + const struct cpumask *bitmap; \ +}; + +#define def_iter_new(name) \ +static inline int bpf_iter_##name##_new( \ + struct bpf_iter_##name *it, const u64 *unsafe_ptr__ign, u32 nr_words) \ +{ \ + it->bitmap = scx_bpf_get_##name##_cpumask(); \ + return bpf_iter_bits_new(&it->it, (const u64 *)it->bitmap, \ + sizeof(struct cpumask) / 8); \ +} + +#define def_iter_next(name) \ +static inline int *bpf_iter_##name##_next(struct bpf_iter_##name *it) { \ + return bpf_iter_bits_next(&it->it); \ +} + +#define def_iter_destroy(name) \ +static inline void bpf_iter_##name##_destroy(struct bpf_iter_##name *it) { \ + scx_bpf_put_cpumask(it->bitmap); \ + bpf_iter_bits_destroy(&it->it); \ +} +#define def_for_each_cpu(cpu, name) for_each_##name##_cpu(cpu) + +/// Provides iterator for possible and online cpus. +/// +/// # Example +/// +/// ``` +/// static inline void example_use() { +/// int *cpu; +/// +/// for_each_possible_cpu(cpu){ +/// bpf_printk("CPU %d is possible", *cpu); +/// } +/// +/// for_each_online_cpu(cpu){ +/// bpf_printk("CPU %d is online", *cpu); +/// } +/// } +/// ``` +def_iter_struct(possible); +def_iter_new(possible); +def_iter_next(possible); +def_iter_destroy(possible); +#define for_each_possible_cpu(cpu) bpf_for_each(possible, cpu, NULL, 0) + +def_iter_struct(online); +def_iter_new(online); +def_iter_next(online); +def_iter_destroy(online); +#define for_each_online_cpu(cpu) bpf_for_each(online, cpu, NULL, 0) + /* * Access a cpumask in read-only mode (typically to check bits). */ @@ -423,5 +501,6 @@ static inline u32 log2_u64(u64 v) } #include "compat.bpf.h" +#include "enums.bpf.h" #endif /* __SCX_COMMON_BPF_H */ diff --git a/tools/sched_ext/include/scx/common.h b/tools/sched_ext/include/scx/common.h index 5b0f90152152..dc18b99e55cd 100644 --- a/tools/sched_ext/include/scx/common.h +++ b/tools/sched_ext/include/scx/common.h @@ -71,5 +71,11 @@ typedef int64_t s64; #include "user_exit_info.h" #include "compat.h" +#include "enums.h" + +/* not available when building kernel tools/sched_ext */ +#if __has_include() +#include +#endif #endif /* __SCHED_EXT_COMMON_H */ diff --git a/tools/sched_ext/include/scx/compat.h b/tools/sched_ext/include/scx/compat.h index cc56ff9aa252..b50280e2ba2b 100644 --- a/tools/sched_ext/include/scx/compat.h +++ b/tools/sched_ext/include/scx/compat.h @@ -149,6 +149,7 @@ static inline long scx_hotplug_seq(void) __skel = __scx_name##__open(); \ SCX_BUG_ON(!__skel, "Could not open " #__scx_name); \ __skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \ + SCX_ENUM_INIT(__skel); \ __skel; \ }) diff --git a/tools/sched_ext/include/scx/enums.autogen.bpf.h b/tools/sched_ext/include/scx/enums.autogen.bpf.h new file mode 100644 index 000000000000..0e941a0d6f88 --- /dev/null +++ b/tools/sched_ext/include/scx/enums.autogen.bpf.h @@ -0,0 +1,105 @@ +/* + * WARNING: This file is autogenerated from scripts/gen_enums.py. If you would + * like to access an enum that is currently missing, add it to the script + * and run it from the root directory to update this file. + */ + +const volatile u64 __SCX_OPS_NAME_LEN __weak; +#define SCX_OPS_NAME_LEN __SCX_OPS_NAME_LEN + +const volatile u64 __SCX_SLICE_DFL __weak; +#define SCX_SLICE_DFL __SCX_SLICE_DFL + +const volatile u64 __SCX_SLICE_INF __weak; +#define SCX_SLICE_INF __SCX_SLICE_INF + +const volatile u64 __SCX_DSQ_FLAG_BUILTIN __weak; +#define SCX_DSQ_FLAG_BUILTIN __SCX_DSQ_FLAG_BUILTIN + +const volatile u64 __SCX_DSQ_FLAG_LOCAL_ON __weak; +#define SCX_DSQ_FLAG_LOCAL_ON __SCX_DSQ_FLAG_LOCAL_ON + +const volatile u64 __SCX_DSQ_INVALID __weak; +#define SCX_DSQ_INVALID __SCX_DSQ_INVALID + +const volatile u64 __SCX_DSQ_GLOBAL __weak; +#define SCX_DSQ_GLOBAL __SCX_DSQ_GLOBAL + +const volatile u64 __SCX_DSQ_LOCAL __weak; +#define SCX_DSQ_LOCAL __SCX_DSQ_LOCAL + +const volatile u64 __SCX_DSQ_LOCAL_ON __weak; +#define SCX_DSQ_LOCAL_ON __SCX_DSQ_LOCAL_ON + +const volatile u64 __SCX_DSQ_LOCAL_CPU_MASK __weak; +#define SCX_DSQ_LOCAL_CPU_MASK __SCX_DSQ_LOCAL_CPU_MASK + +const volatile u64 __SCX_TASK_QUEUED __weak; +#define SCX_TASK_QUEUED __SCX_TASK_QUEUED + +const volatile u64 __SCX_TASK_RESET_RUNNABLE_AT __weak; +#define SCX_TASK_RESET_RUNNABLE_AT __SCX_TASK_RESET_RUNNABLE_AT + +const volatile u64 __SCX_TASK_DEQD_FOR_SLEEP __weak; +#define SCX_TASK_DEQD_FOR_SLEEP __SCX_TASK_DEQD_FOR_SLEEP + +const volatile u64 __SCX_TASK_STATE_SHIFT __weak; +#define SCX_TASK_STATE_SHIFT __SCX_TASK_STATE_SHIFT + +const volatile u64 __SCX_TASK_STATE_BITS __weak; +#define SCX_TASK_STATE_BITS __SCX_TASK_STATE_BITS + +const volatile u64 __SCX_TASK_STATE_MASK __weak; +#define SCX_TASK_STATE_MASK __SCX_TASK_STATE_MASK + +const volatile u64 __SCX_TASK_CURSOR __weak; +#define SCX_TASK_CURSOR __SCX_TASK_CURSOR + +const volatile u64 __SCX_TASK_NONE __weak; +#define SCX_TASK_NONE __SCX_TASK_NONE + +const volatile u64 __SCX_TASK_INIT __weak; +#define SCX_TASK_INIT __SCX_TASK_INIT + +const volatile u64 __SCX_TASK_READY __weak; +#define SCX_TASK_READY __SCX_TASK_READY + +const volatile u64 __SCX_TASK_ENABLED __weak; +#define SCX_TASK_ENABLED __SCX_TASK_ENABLED + +const volatile u64 __SCX_TASK_NR_STATES __weak; +#define SCX_TASK_NR_STATES __SCX_TASK_NR_STATES + +const volatile u64 __SCX_TASK_DSQ_ON_PRIQ __weak; +#define SCX_TASK_DSQ_ON_PRIQ __SCX_TASK_DSQ_ON_PRIQ + +const volatile u64 __SCX_KICK_IDLE __weak; +#define SCX_KICK_IDLE __SCX_KICK_IDLE + +const volatile u64 __SCX_KICK_PREEMPT __weak; +#define SCX_KICK_PREEMPT __SCX_KICK_PREEMPT + +const volatile u64 __SCX_KICK_WAIT __weak; +#define SCX_KICK_WAIT __SCX_KICK_WAIT + +const volatile u64 __SCX_ENQ_WAKEUP __weak; +#define SCX_ENQ_WAKEUP __SCX_ENQ_WAKEUP + +const volatile u64 __SCX_ENQ_HEAD __weak; +#define SCX_ENQ_HEAD __SCX_ENQ_HEAD + +const volatile u64 __SCX_ENQ_PREEMPT __weak; +#define SCX_ENQ_PREEMPT __SCX_ENQ_PREEMPT + +const volatile u64 __SCX_ENQ_REENQ __weak; +#define SCX_ENQ_REENQ __SCX_ENQ_REENQ + +const volatile u64 __SCX_ENQ_LAST __weak; +#define SCX_ENQ_LAST __SCX_ENQ_LAST + +const volatile u64 __SCX_ENQ_CLEAR_OPSS __weak; +#define SCX_ENQ_CLEAR_OPSS __SCX_ENQ_CLEAR_OPSS + +const volatile u64 __SCX_ENQ_DSQ_PRIQ __weak; +#define SCX_ENQ_DSQ_PRIQ __SCX_ENQ_DSQ_PRIQ + diff --git a/tools/sched_ext/include/scx/enums.autogen.h b/tools/sched_ext/include/scx/enums.autogen.h new file mode 100644 index 000000000000..88137a140e72 --- /dev/null +++ b/tools/sched_ext/include/scx/enums.autogen.h @@ -0,0 +1,41 @@ +/* + * WARNING: This file is autogenerated from scripts/gen_enums.py. If you would + * like to access an enum that is currently missing, add it to the script + * and run it from the root directory to update this file. + */ + +#define SCX_ENUM_INIT(skel) do { \ + SCX_ENUM_SET(skel, scx_public_consts, SCX_OPS_NAME_LEN); \ + SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_DFL); \ + SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_INF); \ + SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_BUILTIN); \ + SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_LOCAL_ON); \ + SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_INVALID); \ + SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_GLOBAL); \ + SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_LOCAL); \ + SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_LOCAL_ON); \ + SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_LOCAL_CPU_MASK); \ + SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_QUEUED); \ + SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_RESET_RUNNABLE_AT); \ + SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_DEQD_FOR_SLEEP); \ + SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_SHIFT); \ + SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_BITS); \ + SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_MASK); \ + SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_CURSOR); \ + SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_NONE); \ + SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_INIT); \ + SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_READY); \ + SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_ENABLED); \ + SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_NR_STATES); \ + SCX_ENUM_SET(skel, scx_ent_dsq_flags, SCX_TASK_DSQ_ON_PRIQ); \ + SCX_ENUM_SET(skel, scx_kick_flags, SCX_KICK_IDLE); \ + SCX_ENUM_SET(skel, scx_kick_flags, SCX_KICK_PREEMPT); \ + SCX_ENUM_SET(skel, scx_kick_flags, SCX_KICK_WAIT); \ + SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_WAKEUP); \ + SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_HEAD); \ + SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_PREEMPT); \ + SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_REENQ); \ + SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_LAST); \ + SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_CLEAR_OPSS); \ + SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_DSQ_PRIQ); \ +} while (0) diff --git a/tools/sched_ext/include/scx/enums.bpf.h b/tools/sched_ext/include/scx/enums.bpf.h new file mode 100644 index 000000000000..af704c5d6334 --- /dev/null +++ b/tools/sched_ext/include/scx/enums.bpf.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Convenience macros for getting/setting struct scx_enums instances. + * + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. + */ +#ifndef __SCX_ENUMS_BPF_H +#define __SCX_ENUMS_BPF_H + +#include "enums.autogen.bpf.h" + +#endif /* __SCX_ENUMS_BPF_H */ diff --git a/tools/sched_ext/include/scx/enums.h b/tools/sched_ext/include/scx/enums.h new file mode 100644 index 000000000000..34cbebe974b7 --- /dev/null +++ b/tools/sched_ext/include/scx/enums.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Define struct scx_enums that stores the load-time values of enums + * used by the BPF program. + * + * Copyright (c) 2024 Meta Platforms, Inc. and affiliates. + */ + +#ifndef __SCX_ENUMS_H +#define __SCX_ENUMS_H + +static inline void __ENUM_set(u64 *val, char *type, char *name) +{ + bool res; + + res = __COMPAT_read_enum(type, name, val); + SCX_BUG_ON(!res, "enum not found(%s)", name); +} + +#define SCX_ENUM_SET(skel, type, name) do { \ + __ENUM_set(&skel->rodata->__##name, #type, #name); \ + } while (0) + + +#include "enums.autogen.h" + +#endif /* __SCX_ENUMS_H */ diff --git a/tools/sched_ext/include/scx/user_exit_info.h b/tools/sched_ext/include/scx/user_exit_info.h index 8ce2734402e1..66f856640ee7 100644 --- a/tools/sched_ext/include/scx/user_exit_info.h +++ b/tools/sched_ext/include/scx/user_exit_info.h @@ -10,6 +10,11 @@ #ifndef __USER_EXIT_INFO_H #define __USER_EXIT_INFO_H +#ifdef LSP +#define __bpf__ +#include "../vmlinux.h" +#endif + enum uei_sizes { UEI_REASON_LEN = 128, UEI_MSG_LEN = 1024, @@ -25,9 +30,7 @@ struct user_exit_info { #ifdef __bpf__ -#ifdef LSP -#include "../vmlinux/vmlinux.h" -#else +#ifndef LSP #include "vmlinux.h" #endif #include diff --git a/tools/sched_ext/scx_central.bpf.c b/tools/sched_ext/scx_central.bpf.c index e6fad6211f6c..2907df78241e 100644 --- a/tools/sched_ext/scx_central.bpf.c +++ b/tools/sched_ext/scx_central.bpf.c @@ -57,7 +57,7 @@ enum { const volatile s32 central_cpu; const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */ -const volatile u64 slice_ns = SCX_SLICE_DFL; +const volatile u64 slice_ns; bool timer_pinned = true; u64 nr_total, nr_locals, nr_queued, nr_lost_pids; diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c index e938156ed0a0..1e9f74525d8f 100644 --- a/tools/sched_ext/scx_central.c +++ b/tools/sched_ext/scx_central.c @@ -58,6 +58,7 @@ restart: skel->rodata->central_cpu = 0; skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus(); + skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); while ((opt = getopt(argc, argv, "s:c:pvh")) != -1) { switch (opt) { diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c index 4e3afcd260bf..3dbfa82883be 100644 --- a/tools/sched_ext/scx_flatcg.bpf.c +++ b/tools/sched_ext/scx_flatcg.bpf.c @@ -57,7 +57,7 @@ enum { char _license[] SEC("license") = "GPL"; const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */ -const volatile u64 cgrp_slice_ns = SCX_SLICE_DFL; +const volatile u64 cgrp_slice_ns; const volatile bool fifo_sched; u64 cvtime_now; diff --git a/tools/sched_ext/scx_flatcg.c b/tools/sched_ext/scx_flatcg.c index 5d24ca9c29d9..6dd423eeb4ff 100644 --- a/tools/sched_ext/scx_flatcg.c +++ b/tools/sched_ext/scx_flatcg.c @@ -137,6 +137,7 @@ restart: skel = SCX_OPS_OPEN(flatcg_ops, scx_flatcg); skel->rodata->nr_cpus = libbpf_num_possible_cpus(); + skel->rodata->cgrp_slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); while ((opt = getopt(argc, argv, "s:i:dfvh")) != -1) { double v; diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c index ee264947e0c3..3a20bb0c014a 100644 --- a/tools/sched_ext/scx_qmap.bpf.c +++ b/tools/sched_ext/scx_qmap.bpf.c @@ -33,7 +33,7 @@ enum consts { char _license[] SEC("license") = "GPL"; -const volatile u64 slice_ns = SCX_SLICE_DFL; +const volatile u64 slice_ns; const volatile u32 stall_user_nth; const volatile u32 stall_kernel_nth; const volatile u32 dsp_inf_loop_after; diff --git a/tools/sched_ext/scx_qmap.c b/tools/sched_ext/scx_qmap.c index ac45a02b4055..c4912ab2e76f 100644 --- a/tools/sched_ext/scx_qmap.c +++ b/tools/sched_ext/scx_qmap.c @@ -64,6 +64,8 @@ int main(int argc, char **argv) skel = SCX_OPS_OPEN(qmap_ops, scx_qmap); + skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL"); + while ((opt = getopt(argc, argv, "s:e:t:T:l:b:PHd:D:Spvh")) != -1) { switch (opt) { case 's': From e197f5ec3ad38ad0a014ed1ba672497bdf0550bb Mon Sep 17 00:00:00 2001 From: Liang Jie Date: Fri, 13 Dec 2024 17:52:54 +0800 Subject: [PATCH 03/18] sched_ext: Use sizeof_field for key_len in dsq_hash_params Update the `dsq_hash_params` initialization to use `sizeof_field` for the `key_len` field instead of a hardcoded value. This improves code readability and ensures the key length dynamically matches the size of the `id` field in the `scx_dispatch_q` structure. Signed-off-by: Liang Jie Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 71342f3719c1..54e659ba9476 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -960,7 +960,7 @@ static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task); static struct scx_dispatch_q **global_dsqs; static const struct rhashtable_params dsq_hash_params = { - .key_len = 8, + .key_len = sizeof_field(struct scx_dispatch_q, id), .key_offset = offsetof(struct scx_dispatch_q, id), .head_offset = offsetof(struct scx_dispatch_q, hash_node), }; From bc3a116a44988103187b192414caddbcb598072d Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 23 Dec 2024 00:31:10 +0100 Subject: [PATCH 04/18] sched_ext: Use str_enabled_disabled() helper in update_selcpu_topology() Remove hard-coded strings by using the str_enabled_disabled() helper function. Signed-off-by: Thorsten Blum Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 54e659ba9476..7b229a4fb083 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3376,9 +3376,9 @@ static void update_selcpu_topology(void) rcu_read_unlock(); pr_debug("sched_ext: LLC idle selection %s\n", - enable_llc ? "enabled" : "disabled"); + str_enabled_disabled(enable_llc)); pr_debug("sched_ext: NUMA idle selection %s\n", - enable_numa ? "enabled" : "disabled"); + str_enabled_disabled(enable_numa)); if (enable_llc) static_branch_enable_cpuslocked(&scx_selcpu_topo_llc); From 9cf9aceed21e3f08c94108bd688e812effce4423 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 28 Dec 2024 11:02:50 +0100 Subject: [PATCH 05/18] sched_ext: idle: use assign_cpu() to update the idle cpumask Use the assign_cpu() helper to set or clear the CPU in the idle mask, based on the idle condition. Acked-by: Yury Norov Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 7b229a4fb083..eec4716de225 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3654,10 +3654,7 @@ void __scx_update_idle(struct rq *rq, bool idle) return; } - if (idle) - cpumask_set_cpu(cpu, idle_masks.cpu); - else - cpumask_clear_cpu(cpu, idle_masks.cpu); + assign_cpu(cpu, idle_masks.cpu, idle); #ifdef CONFIG_SCHED_SMT if (sched_smt_active()) { From 02f034dcbf3dcb0989e638fdc00d10984dc2278b Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 28 Dec 2024 11:04:11 +0100 Subject: [PATCH 06/18] sched_ext: idle: clarify comments Add a comments to clarify about the usage of cpumask_intersects(). Moreover, update scx_select_cpu_dfl() description clarifying that the final step of the idle selection logic involves searching for any idle CPU in the system that the task can use. Reviewed-by: Yury Norov Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index eec4716de225..2d701203a3db 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3180,6 +3180,10 @@ static bool test_and_clear_cpu_idle(int cpu) * scx_pick_idle_cpu() can get caught in an infinite loop as * @cpu is never cleared from idle_masks.smt. Ensure that @cpu * is eventually cleared. + * + * NOTE: Use cpumask_intersects() and cpumask_test_cpu() to + * reduce memory writes, which may help alleviate cache + * coherence pressure. */ if (cpumask_intersects(smt, idle_masks.smt)) cpumask_andnot(idle_masks.smt, idle_masks.smt, smt); @@ -3408,6 +3412,8 @@ static void update_selcpu_topology(void) * 4. Pick a CPU within the same NUMA node, if enabled: * - choose a CPU from the same NUMA node to reduce memory access latency. * + * 5. Pick any idle CPU usable by the task. + * * Step 3 and 4 are performed only if the system has, respectively, multiple * LLC domains / multiple NUMA nodes (see scx_selcpu_topo_llc and * scx_selcpu_topo_numa). From c0cf3530098bc13578bd4d0692351a2c4a57425d Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 28 Dec 2024 11:06:33 +0100 Subject: [PATCH 07/18] sched_ext: idle: introduce check_builtin_idle_enabled() helper Minor refactoring to add a helper function for checking if the built-in idle CPU selection policy is enabled. Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 2d701203a3db..926579624c41 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -6297,6 +6297,15 @@ void __init init_sched_ext_class(void) __bpf_kfunc_start_defs(); +static bool check_builtin_idle_enabled(void) +{ + if (static_branch_likely(&scx_builtin_idle_enabled)) + return true; + + scx_ops_error("built-in idle tracking is disabled"); + return false; +} + /** * scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu() * @p: task_struct to select a CPU for @@ -6314,10 +6323,8 @@ __bpf_kfunc_start_defs(); __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) { - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); + if (!check_builtin_idle_enabled()) goto prev_cpu; - } if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) goto prev_cpu; @@ -7411,10 +7418,8 @@ __bpf_kfunc void scx_bpf_put_cpumask(const struct cpumask *cpumask) */ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void) { - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); + if (!check_builtin_idle_enabled()) return cpu_none_mask; - } #ifdef CONFIG_SMP return idle_masks.cpu; @@ -7432,10 +7437,8 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void) */ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void) { - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); + if (!check_builtin_idle_enabled()) return cpu_none_mask; - } #ifdef CONFIG_SMP if (sched_smt_active()) @@ -7473,10 +7476,8 @@ __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask) */ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) { - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); + if (!check_builtin_idle_enabled()) return false; - } if (ops_cpu_valid(cpu, NULL)) return test_and_clear_cpu_idle(cpu); @@ -7506,10 +7507,8 @@ __bpf_kfunc bool scx_bpf_test_and_clear_cpu_idle(s32 cpu) __bpf_kfunc s32 scx_bpf_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags) { - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); + if (!check_builtin_idle_enabled()) return -EBUSY; - } return scx_pick_idle_cpu(cpus_allowed, flags); } From d9071ecb313940fe1d8827fef48bbf9fdaf28a4c Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Sat, 4 Jan 2025 10:00:09 +0100 Subject: [PATCH 08/18] sched_ext: idle: small CPU iteration refactoring Replace the loop to check if all SMT CPUs are idle with cpumask_subset(). This simplifies the code and slightly improves efficiency, while preserving the original behavior. Note that idle_masks.smt handling remains racy, which is acceptable as it serves as an optimization and is self-correcting. Suggested-and-reviewed-by: Yury Norov Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 926579624c41..0ce116e0f67c 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3671,10 +3671,8 @@ void __scx_update_idle(struct rq *rq, bool idle) * idle_masks.smt handling is racy but that's fine as * it's only for optimization and self-correcting. */ - for_each_cpu(cpu, smt) { - if (!cpumask_test_cpu(cpu, idle_masks.cpu)) - return; - } + if (!cpumask_subset(smt, idle_masks.cpu)) + return; cpumask_or(idle_masks.smt, idle_masks.smt, smt); } else { cpumask_andnot(idle_masks.smt, idle_masks.smt, smt); From e4975ac5353395978a7dc49a656adbe6ef9ad063 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Mon, 6 Jan 2025 18:03:09 +0100 Subject: [PATCH 09/18] sched_ext: update scx_bpf_dsq_insert() doc for SCX_DSQ_LOCAL_ON With commit 5b26f7b920f7 ("sched_ext: Allow SCX_DSQ_LOCAL_ON for direct dispatches"), scx_bpf_dsq_insert() can use SCX_DSQ_LOCAL_ON for direct dispatch from ops.enqueue() to target the local DSQ of any CPU. Update the documentation accordingly. Fixes: 5b26f7b920f7 ("sched_ext: Allow SCX_DSQ_LOCAL_ON for direct dispatches") Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- Documentation/scheduler/sched-ext.rst | 6 +++--- kernel/sched/ext.c | 4 +--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst index 6cb8b676ce03..a8ceaad63b6a 100644 --- a/Documentation/scheduler/sched-ext.rst +++ b/Documentation/scheduler/sched-ext.rst @@ -242,9 +242,9 @@ The following briefly shows how a waking task is scheduled and executed. task was inserted directly from ``ops.select_cpu()``). ``ops.enqueue()`` can make one of the following decisions: - * Immediately insert the task into either the global or local DSQ by - calling ``scx_bpf_dsq_insert()`` with ``SCX_DSQ_GLOBAL`` or - ``SCX_DSQ_LOCAL``, respectively. + * Immediately insert the task into either the global or a local DSQ by + calling ``scx_bpf_dsq_insert()`` with one of the following options: + ``SCX_DSQ_GLOBAL``, ``SCX_DSQ_LOCAL``, or ``SCX_DSQ_LOCAL_ON | cpu``. * Immediately insert the task into a custom DSQ by calling ``scx_bpf_dsq_insert()`` with a DSQ ID which is smaller than 2^63. diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 0ce116e0f67c..f408aa5d1efc 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -6406,9 +6406,7 @@ __bpf_kfunc_start_defs(); * ops.select_cpu(), and ops.dispatch(). * * When called from ops.select_cpu() or ops.enqueue(), it's for direct dispatch - * and @p must match the task being enqueued. Also, %SCX_DSQ_LOCAL_ON can't be - * used to target the local DSQ of a CPU other than the enqueueing one. Use - * ops.select_cpu() to be on the target CPU in the first place. + * and @p must match the task being enqueued. * * When called from ops.select_cpu(), @enq_flags and @dsp_id are stored, and @p * will be directly inserted into the corresponding dispatch queue after From 382d7efc14a38baa8b8b91115891ef00fe5a5076 Mon Sep 17 00:00:00 2001 From: Andrea Righi Date: Mon, 6 Jan 2025 08:20:34 +0100 Subject: [PATCH 10/18] sched_ext: Include remaining task time slice in error state dump Report the remaining time slice when dumping task information during an error exit. This information can be useful for tracking incorrect or excessively long time slices in schedulers that implement dynamic time slice logic. Signed-off-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index f408aa5d1efc..c9f2fbb477ed 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5218,9 +5218,9 @@ static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, scx_get_task_state(p), p->scx.flags & ~SCX_TASK_STATE_MASK, p->scx.dsq_flags, ops_state & SCX_OPSS_STATE_MASK, ops_state >> SCX_OPSS_QSEQ_SHIFT); - dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu", + dump_line(s, " sticky/holding_cpu=%d/%d dsq_id=%s dsq_vtime=%llu slice=%llu", p->scx.sticky_cpu, p->scx.holding_cpu, dsq_id_buf, - p->scx.dsq_vtime); + p->scx.dsq_vtime, p->scx.slice); dump_line(s, " cpus=%*pb", cpumask_pr_args(p->cpus_ptr)); if (SCX_HAS_OP(dump_task)) { From a73bca3d9cc0a27dd7061a55841a1d752ec7365a Mon Sep 17 00:00:00 2001 From: Shizhao Chen Date: Tue, 7 Jan 2025 23:52:19 +0800 Subject: [PATCH 11/18] sched_ext: Add option -l in selftest runner to list all available tests The selftest runner currently allows selecting tests via the -t option. This patch adds a new -l option that lists all available tests, providing users with an overview of the tests they can choose from. This enhancement is especially useful for scripting and automation purposes, making it easier to discover and run tests. Signed-off-by: Shizhao Chen Signed-off-by: Tejun Heo --- tools/testing/selftests/sched_ext/runner.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/sched_ext/runner.c b/tools/testing/selftests/sched_ext/runner.c index eab48c7ff309..aa2d7d32dda9 100644 --- a/tools/testing/selftests/sched_ext/runner.c +++ b/tools/testing/selftests/sched_ext/runner.c @@ -22,11 +22,12 @@ const char help_fmt[] = "\n" " -t TEST Only run tests whose name includes this string\n" " -s Include print output for skipped tests\n" +" -l List all available tests\n" " -q Don't print the test descriptions during run\n" " -h Display this help and exit\n"; static volatile int exit_req; -static bool quiet, print_skipped; +static bool quiet, print_skipped, list; #define MAX_SCX_TESTS 2048 @@ -133,7 +134,7 @@ int main(int argc, char **argv) libbpf_set_strict_mode(LIBBPF_STRICT_ALL); - while ((opt = getopt(argc, argv, "qst:h")) != -1) { + while ((opt = getopt(argc, argv, "qslt:h")) != -1) { switch (opt) { case 'q': quiet = true; @@ -141,6 +142,9 @@ int main(int argc, char **argv) case 's': print_skipped = true; break; + case 'l': + list = true; + break; case 't': filter = optarg; break; @@ -154,6 +158,13 @@ int main(int argc, char **argv) enum scx_test_status status; struct scx_test *test = &__scx_tests[i]; + if (list) { + printf("%s\n", test->name); + if (i == (__scx_num_tests - 1)) + return 0; + continue; + } + if (filter && should_skip_test(test, filter)) { /* * Printing the skipped tests and their preambles can From ea9b2626271f4e82f329ba52503dd5193fe656e9 Mon Sep 17 00:00:00 2001 From: Changwoo Min Date: Thu, 9 Jan 2025 22:14:51 +0900 Subject: [PATCH 12/18] sched_ext: Relocate scx_enabled() related code scx_enabled() will be used in scx_rq_clock_update/invalidate() in the following patch, so relocate the scx_enabled() related code to the proper location. Signed-off-by: Changwoo Min Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/sched.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 76f5f53a645f..440ecedf871b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1717,6 +1717,19 @@ struct rq_flags { extern struct balance_callback balance_push_callback; +#ifdef CONFIG_SCHED_CLASS_EXT +extern const struct sched_class ext_sched_class; + +DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); /* SCX BPF scheduler loaded */ +DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */ + +#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) +#define scx_switched_all() static_branch_unlikely(&__scx_switched_all) +#else /* !CONFIG_SCHED_CLASS_EXT */ +#define scx_enabled() false +#define scx_switched_all() false +#endif /* !CONFIG_SCHED_CLASS_EXT */ + /* * Lockdep annotation that avoids accidental unlocks; it's like a * sticky/continuous lockdep_assert_held(). @@ -2505,19 +2518,6 @@ extern const struct sched_class rt_sched_class; extern const struct sched_class fair_sched_class; extern const struct sched_class idle_sched_class; -#ifdef CONFIG_SCHED_CLASS_EXT -extern const struct sched_class ext_sched_class; - -DECLARE_STATIC_KEY_FALSE(__scx_ops_enabled); /* SCX BPF scheduler loaded */ -DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */ - -#define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) -#define scx_switched_all() static_branch_unlikely(&__scx_switched_all) -#else /* !CONFIG_SCHED_CLASS_EXT */ -#define scx_enabled() false -#define scx_switched_all() false -#endif /* !CONFIG_SCHED_CLASS_EXT */ - /* * Iterate only active classes. SCX can take over all fair tasks or be * completely disabled. If the former, skip fair. If the latter, skip SCX. From 3a9910b5904d29c566e3ff9290990b519827ba75 Mon Sep 17 00:00:00 2001 From: Changwoo Min Date: Thu, 9 Jan 2025 22:14:52 +0900 Subject: [PATCH 13/18] sched_ext: Implement scx_bpf_now() Returns a high-performance monotonically non-decreasing clock for the current CPU. The clock returned is in nanoseconds. It provides the following properties: 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently to account for execution time and track tasks' runtime properties. Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which eventually reads a hardware timestamp counter -- is neither performant nor scalable. scx_bpf_now() aims to provide a high-performance clock by using the rq clock in the scheduler core whenever possible. 2) High enough resolution for the BPF scheduler use cases: In most BPF scheduler use cases, the required clock resolution is lower than the most accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically uses the rq clock in the scheduler core whenever it is valid. It considers that the rq clock is valid from the time the rq clock is updated (update_rq_clock) until the rq is unlocked (rq_unpin_lock). 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now() guarantees the clock never goes backward when comparing them in the same CPU. On the other hand, when comparing clocks in different CPUs, there is no such guarantee -- the clock can go backward. It provides a monotonically *non-decreasing* clock so that it would provide the same clock values in two different scx_bpf_now() calls in the same CPU during the same period of when the rq clock is valid. An rq clock becomes valid when it is updated using update_rq_clock() and invalidated when the rq is unlocked using rq_unpin_lock(). Let's suppose the following timeline in the scheduler core: T1. rq_lock(rq) T2. update_rq_clock(rq) T3. a sched_ext BPF operation T4. rq_unlock(rq) T5. a sched_ext BPF operation T6. rq_lock(rq) T7. update_rq_clock(rq) For [T2, T4), we consider that rq clock is valid (SCX_RQ_CLK_VALID is set), so scx_bpf_now() calls during [T2, T4) (including T3) will return the rq clock updated at T2. For duration [T4, T7), when a BPF scheduler can still call scx_bpf_now() (T5), we consider the rq clock is invalid (SCX_RQ_CLK_VALID is unset at T4). So when calling scx_bpf_now() at T5, we will return a fresh clock value by calling sched_clock_cpu() internally. Also, to prevent getting outdated rq clocks from a previous scx scheduler, invalidate all the rq clocks when unloading a BPF scheduler. One example of calling scx_bpf_now(), when the rq clock is invalid (like T5), is in scx_central [1]. The scx_central scheduler uses a BPF timer for preemptive scheduling. In every msec, the timer callback checks if the currently running tasks exceed their timeslice. At the beginning of the BPF timer callback (central_timerfn in scx_central.bpf.c), scx_central gets the current time. When the BPF timer callback runs, the rq clock could be invalid, the same as T5. In this case, scx_bpf_now() returns a fresh clock value rather than returning the old one (T2). [1] https://github.com/sched-ext/scx/blob/main/scheds/c/scx_central.bpf.c Signed-off-by: Changwoo Min Acked-by: Peter Zijlstra (Intel) Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/core.c | 6 +++- kernel/sched/ext.c | 74 +++++++++++++++++++++++++++++++++++++++++++- kernel/sched/sched.h | 25 +++++++++++++-- 3 files changed, 101 insertions(+), 4 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 95e40895a519..ab8015c8cab4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -789,6 +789,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) void update_rq_clock(struct rq *rq) { s64 delta; + u64 clock; lockdep_assert_rq_held(rq); @@ -800,11 +801,14 @@ void update_rq_clock(struct rq *rq) SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); rq->clock_update_flags |= RQCF_UPDATED; #endif + clock = sched_clock_cpu(cpu_of(rq)); + scx_rq_clock_update(rq, clock); - delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; + delta = clock - rq->clock; if (delta < 0) return; rq->clock += delta; + update_rq_clock_task(rq, delta); } diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index c9f2fbb477ed..573711a92009 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -4911,7 +4911,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work) struct task_struct *p; struct rhashtable_iter rht_iter; struct scx_dispatch_q *dsq; - int i, kind; + int i, kind, cpu; kind = atomic_read(&scx_exit_kind); while (true) { @@ -4994,6 +4994,15 @@ static void scx_ops_disable_workfn(struct kthread_work *work) scx_task_iter_stop(&sti); percpu_up_write(&scx_fork_rwsem); + /* + * Invalidate all the rq clocks to prevent getting outdated + * rq clocks from a previous scx scheduler. + */ + for_each_possible_cpu(cpu) { + struct rq *rq = cpu_rq(cpu); + scx_rq_clock_invalidate(rq); + } + /* no task is on scx, turn off all the switches and flush in-progress calls */ static_branch_disable(&__scx_ops_enabled); for (i = SCX_OPI_BEGIN; i < SCX_OPI_END; i++) @@ -7599,6 +7608,68 @@ out: } #endif +/** + * scx_bpf_now - Returns a high-performance monotonically non-decreasing + * clock for the current CPU. The clock returned is in nanoseconds. + * + * It provides the following properties: + * + * 1) High performance: Many BPF schedulers call bpf_ktime_get_ns() frequently + * to account for execution time and track tasks' runtime properties. + * Unfortunately, in some hardware platforms, bpf_ktime_get_ns() -- which + * eventually reads a hardware timestamp counter -- is neither performant nor + * scalable. scx_bpf_now() aims to provide a high-performance clock by + * using the rq clock in the scheduler core whenever possible. + * + * 2) High enough resolution for the BPF scheduler use cases: In most BPF + * scheduler use cases, the required clock resolution is lower than the most + * accurate hardware clock (e.g., rdtsc in x86). scx_bpf_now() basically + * uses the rq clock in the scheduler core whenever it is valid. It considers + * that the rq clock is valid from the time the rq clock is updated + * (update_rq_clock) until the rq is unlocked (rq_unpin_lock). + * + * 3) Monotonically non-decreasing clock for the same CPU: scx_bpf_now() + * guarantees the clock never goes backward when comparing them in the same + * CPU. On the other hand, when comparing clocks in different CPUs, there + * is no such guarantee -- the clock can go backward. It provides a + * monotonically *non-decreasing* clock so that it would provide the same + * clock values in two different scx_bpf_now() calls in the same CPU + * during the same period of when the rq clock is valid. + */ +__bpf_kfunc u64 scx_bpf_now(void) +{ + struct rq *rq; + u64 clock; + + preempt_disable(); + + rq = this_rq(); + if (smp_load_acquire(&rq->scx.flags) & SCX_RQ_CLK_VALID) { + /* + * If the rq clock is valid, use the cached rq clock. + * + * Note that scx_bpf_now() is re-entrant between a process + * context and an interrupt context (e.g., timer interrupt). + * However, we don't need to consider the race between them + * because such race is not observable from a caller. + */ + clock = READ_ONCE(rq->scx.clock); + } else { + /* + * Otherwise, return a fresh rq clock. + * + * The rq clock is updated outside of the rq lock. + * In this case, keep the updated rq clock invalid so the next + * kfunc call outside the rq lock gets a fresh rq clock. + */ + clock = sched_clock_cpu(cpu_of(rq)); + } + + preempt_enable(); + + return clock; +} + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(scx_kfunc_ids_any) @@ -7630,6 +7701,7 @@ BTF_ID_FLAGS(func, scx_bpf_cpu_rq) #ifdef CONFIG_CGROUP_SCHED BTF_ID_FLAGS(func, scx_bpf_task_cgroup, KF_RCU | KF_ACQUIRE) #endif +BTF_ID_FLAGS(func, scx_bpf_now) BTF_KFUNCS_END(scx_kfunc_ids_any) static const struct btf_kfunc_id_set scx_kfunc_set_any = { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 440ecedf871b..a97f96a06615 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -754,6 +754,7 @@ enum scx_rq_flags { SCX_RQ_BAL_PENDING = 1 << 2, /* balance hasn't run yet */ SCX_RQ_BAL_KEEP = 1 << 3, /* balance decided to keep current */ SCX_RQ_BYPASSING = 1 << 4, + SCX_RQ_CLK_VALID = 1 << 5, /* RQ clock is fresh and valid */ SCX_RQ_IN_WAKEUP = 1 << 16, SCX_RQ_IN_BALANCE = 1 << 17, @@ -766,9 +767,10 @@ struct scx_rq { unsigned long ops_qseq; u64 extra_enq_flags; /* see move_task_to_local_dsq() */ u32 nr_running; - u32 flags; u32 cpuperf_target; /* [0, SCHED_CAPACITY_SCALE] */ bool cpu_released; + u32 flags; + u64 clock; /* current per-rq clock -- see scx_bpf_now() */ cpumask_var_t cpus_to_kick; cpumask_var_t cpus_to_kick_if_idle; cpumask_var_t cpus_to_preempt; @@ -1725,9 +1727,28 @@ DECLARE_STATIC_KEY_FALSE(__scx_switched_all); /* all fair class tasks on SCX */ #define scx_enabled() static_branch_unlikely(&__scx_ops_enabled) #define scx_switched_all() static_branch_unlikely(&__scx_switched_all) + +static inline void scx_rq_clock_update(struct rq *rq, u64 clock) +{ + if (!scx_enabled()) + return; + WRITE_ONCE(rq->scx.clock, clock); + smp_store_release(&rq->scx.flags, rq->scx.flags | SCX_RQ_CLK_VALID); +} + +static inline void scx_rq_clock_invalidate(struct rq *rq) +{ + if (!scx_enabled()) + return; + WRITE_ONCE(rq->scx.flags, rq->scx.flags & ~SCX_RQ_CLK_VALID); +} + #else /* !CONFIG_SCHED_CLASS_EXT */ #define scx_enabled() false #define scx_switched_all() false + +static inline void scx_rq_clock_update(struct rq *rq, u64 clock) {} +static inline void scx_rq_clock_invalidate(struct rq *rq) {} #endif /* !CONFIG_SCHED_CLASS_EXT */ /* @@ -1759,7 +1780,7 @@ static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf) if (rq->clock_update_flags > RQCF_ACT_SKIP) rf->clock_update_flags = RQCF_UPDATED; #endif - + scx_rq_clock_invalidate(rq); lockdep_unpin_lock(__rq_lockp(rq), rf->cookie); } From 2e1ce39fde7caacc98bc0472d15e8c641dfb31bf Mon Sep 17 00:00:00 2001 From: Changwoo Min Date: Thu, 9 Jan 2025 22:14:53 +0900 Subject: [PATCH 14/18] sched_ext: Add scx_bpf_now() for BPF scheduler scx_bpf_now() is added to the header files so the BPF scheduler can use it. Signed-off-by: Changwoo Min Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- tools/sched_ext/include/scx/common.bpf.h | 1 + tools/sched_ext/include/scx/compat.bpf.h | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h index 858ba1f438f6..5c9517190713 100644 --- a/tools/sched_ext/include/scx/common.bpf.h +++ b/tools/sched_ext/include/scx/common.bpf.h @@ -76,6 +76,7 @@ bool scx_bpf_task_running(const struct task_struct *p) __ksym; s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym; struct rq *scx_bpf_cpu_rq(s32 cpu) __ksym; struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym __weak; +u64 scx_bpf_now(void) __ksym __weak; /* * Use the following as @it__iter when calling scx_bpf_dsq_move[_vtime]() from diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h index d56520100a26..50e1499ae093 100644 --- a/tools/sched_ext/include/scx/compat.bpf.h +++ b/tools/sched_ext/include/scx/compat.bpf.h @@ -125,6 +125,11 @@ bool scx_bpf_dispatch_vtime_from_dsq___compat(struct bpf_iter_scx_dsq *it__iter, false; \ }) +#define scx_bpf_now() \ + (bpf_ksym_exists(scx_bpf_now) ? \ + scx_bpf_now() : \ + bpf_ktime_get_ns()) + /* * Define sched_ext_ops. This may be expanded to define multiple variants for * backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH(). From d07be814fc7165cb804317c99228243382e81188 Mon Sep 17 00:00:00 2001 From: Changwoo Min Date: Thu, 9 Jan 2025 22:14:54 +0900 Subject: [PATCH 15/18] sched_ext: Add time helpers for BPF schedulers The following functions are added for BPF schedulers: - time_delta(after, before) - time_after(a, b) - time_before(a, b) - time_after_eq(a, b) - time_before_eq(a, b) - time_in_range(a, b, c) - time_in_range_open(a, b, c) Signed-off-by: Changwoo Min Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- tools/sched_ext/include/scx/common.bpf.h | 94 ++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h index 5c9517190713..f3e15e9efa76 100644 --- a/tools/sched_ext/include/scx/common.bpf.h +++ b/tools/sched_ext/include/scx/common.bpf.h @@ -408,6 +408,100 @@ static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask) void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; +/* + * Time helpers, most of which are from jiffies.h. + */ + +/** + * time_delta - Calculate the delta between new and old time stamp + * @after: first comparable as u64 + * @before: second comparable as u64 + * + * Return: the time difference, which is >= 0 + */ +static inline s64 time_delta(u64 after, u64 before) +{ + return (s64)(after - before) > 0 ? : 0; +} + +/** + * time_after - returns true if the time a is after time b. + * @a: first comparable as u64 + * @b: second comparable as u64 + * + * Do this with "<0" and ">=0" to only test the sign of the result. A + * good compiler would generate better code (and a really good compiler + * wouldn't care). Gcc is currently neither. + * + * Return: %true is time a is after time b, otherwise %false. + */ +static inline bool time_after(u64 a, u64 b) +{ + return (s64)(b - a) < 0; +} + +/** + * time_before - returns true if the time a is before time b. + * @a: first comparable as u64 + * @b: second comparable as u64 + * + * Return: %true is time a is before time b, otherwise %false. + */ +static inline bool time_before(u64 a, u64 b) +{ + return time_after(b, a); +} + +/** + * time_after_eq - returns true if the time a is after or the same as time b. + * @a: first comparable as u64 + * @b: second comparable as u64 + * + * Return: %true is time a is after or the same as time b, otherwise %false. + */ +static inline bool time_after_eq(u64 a, u64 b) +{ + return (s64)(a - b) >= 0; +} + +/** + * time_before_eq - returns true if the time a is before or the same as time b. + * @a: first comparable as u64 + * @b: second comparable as u64 + * + * Return: %true is time a is before or the same as time b, otherwise %false. + */ +static inline bool time_before_eq(u64 a, u64 b) +{ + return time_after_eq(b, a); +} + +/** + * time_in_range - Calculate whether a is in the range of [b, c]. + * @a: time to test + * @b: beginning of the range + * @c: end of the range + * + * Return: %true is time a is in the range [b, c], otherwise %false. + */ +static inline bool time_in_range(u64 a, u64 b, u64 c) +{ + return time_after_eq(a, b) && time_before_eq(a, c); +} + +/** + * time_in_range_open - Calculate whether a is in the range of [b, c). + * @a: time to test + * @b: beginning of the range + * @c: end of the range + * + * Return: %true is time a is in the range [b, c), otherwise %false. + */ +static inline bool time_in_range_open(u64 a, u64 b, u64 c) +{ + return time_after_eq(a, b) && time_before(a, c); +} + /* * Other helpers From 0f130bc341d09a82ad23e7fc59d4306528c4c4ce Mon Sep 17 00:00:00 2001 From: Changwoo Min Date: Thu, 9 Jan 2025 22:14:55 +0900 Subject: [PATCH 16/18] sched_ext: Replace bpf_ktime_get_ns() to scx_bpf_now() In the BPF schedulers that use bpf_ktime_get_ns() -- scx_central and scx_flatcg, replace bpf_ktime_get_ns() calls to scx_bpf_now(). Signed-off-by: Changwoo Min Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- tools/sched_ext/scx_central.bpf.c | 4 ++-- tools/sched_ext/scx_flatcg.bpf.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/sched_ext/scx_central.bpf.c b/tools/sched_ext/scx_central.bpf.c index 2907df78241e..4239034ad593 100644 --- a/tools/sched_ext/scx_central.bpf.c +++ b/tools/sched_ext/scx_central.bpf.c @@ -245,7 +245,7 @@ void BPF_STRUCT_OPS(central_running, struct task_struct *p) s32 cpu = scx_bpf_task_cpu(p); u64 *started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); if (started_at) - *started_at = bpf_ktime_get_ns() ?: 1; /* 0 indicates idle */ + *started_at = scx_bpf_now() ?: 1; /* 0 indicates idle */ } void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable) @@ -258,7 +258,7 @@ void BPF_STRUCT_OPS(central_stopping, struct task_struct *p, bool runnable) static int central_timerfn(void *map, int *key, struct bpf_timer *timer) { - u64 now = bpf_ktime_get_ns(); + u64 now = scx_bpf_now(); u64 nr_to_kick = nr_queued; s32 i, curr_cpu; diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c index 3dbfa82883be..5f588963fb2f 100644 --- a/tools/sched_ext/scx_flatcg.bpf.c +++ b/tools/sched_ext/scx_flatcg.bpf.c @@ -734,7 +734,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev) struct fcg_cpu_ctx *cpuc; struct fcg_cgrp_ctx *cgc; struct cgroup *cgrp; - u64 now = bpf_ktime_get_ns(); + u64 now = scx_bpf_now(); bool picked_next = false; cpuc = find_cpu_ctx(); From 62addc6dbf3644272c064c16076221bf4f633f25 Mon Sep 17 00:00:00 2001 From: Changwoo Min Date: Thu, 9 Jan 2025 22:14:56 +0900 Subject: [PATCH 17/18] sched_ext: Use time helpers in BPF schedulers Modify the BPF schedulers to use time helpers defined in common.bpf.h Signed-off-by: Changwoo Min Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- tools/sched_ext/scx_central.bpf.c | 7 +------ tools/sched_ext/scx_flatcg.bpf.c | 21 ++++++++------------- tools/sched_ext/scx_simple.bpf.c | 9 ++------- 3 files changed, 11 insertions(+), 26 deletions(-) diff --git a/tools/sched_ext/scx_central.bpf.c b/tools/sched_ext/scx_central.bpf.c index 4239034ad593..50bc1737c167 100644 --- a/tools/sched_ext/scx_central.bpf.c +++ b/tools/sched_ext/scx_central.bpf.c @@ -87,11 +87,6 @@ struct { __type(value, struct central_timer); } central_timer SEC(".maps"); -static bool vtime_before(u64 a, u64 b) -{ - return (s64)(a - b) < 0; -} - s32 BPF_STRUCT_OPS(central_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) { @@ -279,7 +274,7 @@ static int central_timerfn(void *map, int *key, struct bpf_timer *timer) /* kick iff the current one exhausted its slice */ started_at = ARRAY_ELEM_PTR(cpu_started_at, cpu, nr_cpu_ids); if (started_at && *started_at && - vtime_before(now, *started_at + slice_ns)) + time_before(now, *started_at + slice_ns)) continue; /* and there's something pending */ diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c index 5f588963fb2f..2c720e3ecad5 100644 --- a/tools/sched_ext/scx_flatcg.bpf.c +++ b/tools/sched_ext/scx_flatcg.bpf.c @@ -137,11 +137,6 @@ static u64 div_round_up(u64 dividend, u64 divisor) return (dividend + divisor - 1) / divisor; } -static bool vtime_before(u64 a, u64 b) -{ - return (s64)(a - b) < 0; -} - static bool cgv_node_less(struct bpf_rb_node *a, const struct bpf_rb_node *b) { struct cgv_node *cgc_a, *cgc_b; @@ -271,7 +266,7 @@ static void cgrp_cap_budget(struct cgv_node *cgv_node, struct fcg_cgrp_ctx *cgc) */ max_budget = (cgrp_slice_ns * nr_cpus * cgc->hweight) / (2 * FCG_HWEIGHT_ONE); - if (vtime_before(cvtime, cvtime_now - max_budget)) + if (time_before(cvtime, cvtime_now - max_budget)) cvtime = cvtime_now - max_budget; cgv_node->cvtime = cvtime; @@ -401,7 +396,7 @@ void BPF_STRUCT_OPS(fcg_enqueue, struct task_struct *p, u64 enq_flags) * Limit the amount of budget that an idling task can accumulate * to one slice. */ - if (vtime_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL)) + if (time_before(tvtime, cgc->tvtime_now - SCX_SLICE_DFL)) tvtime = cgc->tvtime_now - SCX_SLICE_DFL; scx_bpf_dsq_insert_vtime(p, cgrp->kn->id, SCX_SLICE_DFL, @@ -535,7 +530,7 @@ void BPF_STRUCT_OPS(fcg_running, struct task_struct *p) * from multiple CPUs and thus racy. Any error should be * contained and temporary. Let's just live with it. */ - if (vtime_before(cgc->tvtime_now, p->scx.dsq_vtime)) + if (time_before(cgc->tvtime_now, p->scx.dsq_vtime)) cgc->tvtime_now = p->scx.dsq_vtime; } bpf_cgroup_release(cgrp); @@ -645,7 +640,7 @@ static bool try_pick_next_cgroup(u64 *cgidp) cgv_node = container_of(rb_node, struct cgv_node, rb_node); cgid = cgv_node->cgid; - if (vtime_before(cvtime_now, cgv_node->cvtime)) + if (time_before(cvtime_now, cgv_node->cvtime)) cvtime_now = cgv_node->cvtime; /* @@ -744,7 +739,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev) if (!cpuc->cur_cgid) goto pick_next_cgroup; - if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) { + if (time_before(now, cpuc->cur_at + cgrp_slice_ns)) { if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid)) { stat_inc(FCG_STAT_CNS_KEEP); return; @@ -920,14 +915,14 @@ void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p, struct cgroup *from, struct cgroup *to) { struct fcg_cgrp_ctx *from_cgc, *to_cgc; - s64 vtime_delta; + s64 delta; /* find_cgrp_ctx() triggers scx_ops_error() on lookup failures */ if (!(from_cgc = find_cgrp_ctx(from)) || !(to_cgc = find_cgrp_ctx(to))) return; - vtime_delta = p->scx.dsq_vtime - from_cgc->tvtime_now; - p->scx.dsq_vtime = to_cgc->tvtime_now + vtime_delta; + delta = time_delta(p->scx.dsq_vtime, from_cgc->tvtime_now); + p->scx.dsq_vtime = to_cgc->tvtime_now + delta; } s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init) diff --git a/tools/sched_ext/scx_simple.bpf.c b/tools/sched_ext/scx_simple.bpf.c index 31f915b286c6..e6de99dba7db 100644 --- a/tools/sched_ext/scx_simple.bpf.c +++ b/tools/sched_ext/scx_simple.bpf.c @@ -52,11 +52,6 @@ static void stat_inc(u32 idx) (*cnt_p)++; } -static inline bool vtime_before(u64 a, u64 b) -{ - return (s64)(a - b) < 0; -} - s32 BPF_STRUCT_OPS(simple_select_cpu, struct task_struct *p, s32 prev_cpu, u64 wake_flags) { bool is_idle = false; @@ -84,7 +79,7 @@ void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags) * Limit the amount of budget that an idling task can accumulate * to one slice. */ - if (vtime_before(vtime, vtime_now - SCX_SLICE_DFL)) + if (time_before(vtime, vtime_now - SCX_SLICE_DFL)) vtime = vtime_now - SCX_SLICE_DFL; scx_bpf_dsq_insert_vtime(p, SHARED_DSQ, SCX_SLICE_DFL, vtime, @@ -108,7 +103,7 @@ void BPF_STRUCT_OPS(simple_running, struct task_struct *p) * thus racy. Any error should be contained and temporary. Let's just * live with it. */ - if (vtime_before(vtime_now, p->scx.dsq_vtime)) + if (time_before(vtime_now, p->scx.dsq_vtime)) vtime_now = p->scx.dsq_vtime; } From 987ce79b5242c048acae3a0c1feaae0a353d5cde Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 10 Jan 2025 22:31:36 -0800 Subject: [PATCH 18/18] sched_ext: fix kernel-doc warnings Use the correct function parameter names and function names. Use the correct kernel-doc comment format for struct sched_ext_ops to eliminate a bunch of warnings. ext.c:1418: warning: Excess function parameter 'include_dead' description in 'scx_task_iter_next_locked' ext.c:7261: warning: expecting prototype for scx_bpf_dump(). Prototype was for scx_bpf_dump_bstr() instead ext.c:7352: warning: Excess function parameter 'flags' description in 'scx_bpf_cpuperf_set' ext.c:3150: warning: Function parameter or struct member 'in_fi' not described in 'scx_prio_less' ext.c:4711: warning: Function parameter or struct member 'dur_s' not described in 'scx_softlockup' ext.c:4775: warning: Function parameter or struct member 'bypass' not described in 'scx_ops_bypass' ext.c:7453: warning: Function parameter or struct member 'idle_mask' not described in 'scx_bpf_put_idle_cpumask' ext.c:209: warning: Incorrect use of kernel-doc format: * select_cpu - Pick the target CPU for a task which is being woken up ext.c:236: warning: Incorrect use of kernel-doc format: * enqueue - Enqueue a task on the BPF scheduler ext.c:251: warning: Incorrect use of kernel-doc format: * dequeue - Remove a task from the BPF scheduler ext.c:267: warning: Incorrect use of kernel-doc format: * dispatch - Dispatch tasks from the BPF scheduler and/or user DSQs ext.c:290: warning: Incorrect use of kernel-doc format: * tick - Periodic tick ext.c:300: warning: Incorrect use of kernel-doc format: * runnable - A task is becoming runnable on its associated CPU ext.c:327: warning: Incorrect use of kernel-doc format: * running - A task is starting to run on its associated CPU ext.c:335: warning: Incorrect use of kernel-doc format: * stopping - A task is stopping execution ext.c:346: warning: Incorrect use of kernel-doc format: * quiescent - A task is becoming not runnable on its associated CPU ext.c:366: warning: Incorrect use of kernel-doc format: * yield - Yield CPU ext.c:381: warning: Incorrect use of kernel-doc format: * core_sched_before - Task ordering for core-sched ext.c:399: warning: Incorrect use of kernel-doc format: * set_weight - Set task weight ext.c:408: warning: Incorrect use of kernel-doc format: * set_cpumask - Set CPU affinity ext.c:418: warning: Incorrect use of kernel-doc format: * update_idle - Update the idle state of a CPU ext.c:439: warning: Incorrect use of kernel-doc format: * cpu_acquire - A CPU is becoming available to the BPF scheduler ext.c:449: warning: Incorrect use of kernel-doc format: * cpu_release - A CPU is taken away from the BPF scheduler ext.c:461: warning: Incorrect use of kernel-doc format: * init_task - Initialize a task to run in a BPF scheduler ext.c:476: warning: Incorrect use of kernel-doc format: * exit_task - Exit a previously-running task from the system ext.c:485: warning: Incorrect use of kernel-doc format: * enable - Enable BPF scheduling for a task ext.c:494: warning: Incorrect use of kernel-doc format: * disable - Disable BPF scheduling for a task ext.c:504: warning: Incorrect use of kernel-doc format: * dump - Dump BPF scheduler state on error ext.c:512: warning: Incorrect use of kernel-doc format: * dump_cpu - Dump BPF scheduler state for a CPU on error ext.c:524: warning: Incorrect use of kernel-doc format: * dump_task - Dump BPF scheduler state for a runnable task on error ext.c:535: warning: Incorrect use of kernel-doc format: * cgroup_init - Initialize a cgroup ext.c:550: warning: Incorrect use of kernel-doc format: * cgroup_exit - Exit a cgroup ext.c:559: warning: Incorrect use of kernel-doc format: * cgroup_prep_move - Prepare a task to be moved to a different cgroup ext.c:574: warning: Incorrect use of kernel-doc format: * cgroup_move - Commit cgroup move ext.c:585: warning: Incorrect use of kernel-doc format: * cgroup_cancel_move - Cancel cgroup move ext.c:597: warning: Incorrect use of kernel-doc format: * cgroup_set_weight - A cgroup's weight is being changed ext.c:611: warning: Incorrect use of kernel-doc format: * cpu_online - A CPU became online ext.c:620: warning: Incorrect use of kernel-doc format: * cpu_offline - A CPU is going offline ext.c:633: warning: Incorrect use of kernel-doc format: * init - Initialize the BPF scheduler ext.c:638: warning: Incorrect use of kernel-doc format: * exit - Clean up after the BPF scheduler ext.c:648: warning: Incorrect use of kernel-doc format: * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch ext.c:653: warning: Incorrect use of kernel-doc format: * flags - %SCX_OPS_* flags ext.c:658: warning: Incorrect use of kernel-doc format: * timeout_ms - The maximum amount of time, in milliseconds, that a ext.c:667: warning: Incorrect use of kernel-doc format: * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default ext.c:673: warning: Incorrect use of kernel-doc format: * hotplug_seq - A sequence number that may be set by the scheduler to ext.c:682: warning: Incorrect use of kernel-doc format: * name - BPF scheduler's name ext.c:689: warning: Function parameter or struct member 'select_cpu' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'enqueue' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'dequeue' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'dispatch' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'tick' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'runnable' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'running' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'stopping' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'quiescent' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'yield' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'core_sched_before' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'set_weight' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'set_cpumask' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'update_idle' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'cpu_acquire' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'cpu_release' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'init_task' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'exit_task' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'enable' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'disable' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'dump' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'dump_cpu' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'dump_task' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'cgroup_init' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'cgroup_exit' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'cgroup_prep_move' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'cgroup_move' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'cgroup_cancel_move' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'cgroup_set_weight' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'cpu_online' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'cpu_offline' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'init' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'exit' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'dispatch_max_batch' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'flags' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'timeout_ms' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'exit_dump_len' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'hotplug_seq' not described in 'sched_ext_ops' ext.c:689: warning: Function parameter or struct member 'name' not described in 'sched_ext_ops' Signed-off-by: Randy Dunlap Cc: Tejun Heo Cc: David Vernet Cc: Changwoo Min Cc: Ingo Molnar Cc: Peter Zijlstra Cc: bpf@vger.kernel.org Acked-by: Andrea Righi Signed-off-by: Tejun Heo --- kernel/sched/ext.c | 87 ++++++++++++++++++++++++---------------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 573711a92009..64967d28da7f 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -206,7 +206,7 @@ struct scx_dump_ctx { */ struct sched_ext_ops { /** - * select_cpu - Pick the target CPU for a task which is being woken up + * @select_cpu: Pick the target CPU for a task which is being woken up * @p: task being woken up * @prev_cpu: the cpu @p was on before sleeping * @wake_flags: SCX_WAKE_* @@ -233,7 +233,7 @@ struct sched_ext_ops { s32 (*select_cpu)(struct task_struct *p, s32 prev_cpu, u64 wake_flags); /** - * enqueue - Enqueue a task on the BPF scheduler + * @enqueue: Enqueue a task on the BPF scheduler * @p: task being enqueued * @enq_flags: %SCX_ENQ_* * @@ -248,7 +248,7 @@ struct sched_ext_ops { void (*enqueue)(struct task_struct *p, u64 enq_flags); /** - * dequeue - Remove a task from the BPF scheduler + * @dequeue: Remove a task from the BPF scheduler * @p: task being dequeued * @deq_flags: %SCX_DEQ_* * @@ -264,7 +264,7 @@ struct sched_ext_ops { void (*dequeue)(struct task_struct *p, u64 deq_flags); /** - * dispatch - Dispatch tasks from the BPF scheduler and/or user DSQs + * @dispatch: Dispatch tasks from the BPF scheduler and/or user DSQs * @cpu: CPU to dispatch tasks for * @prev: previous task being switched out * @@ -287,7 +287,7 @@ struct sched_ext_ops { void (*dispatch)(s32 cpu, struct task_struct *prev); /** - * tick - Periodic tick + * @tick: Periodic tick * @p: task running currently * * This operation is called every 1/HZ seconds on CPUs which are @@ -297,7 +297,7 @@ struct sched_ext_ops { void (*tick)(struct task_struct *p); /** - * runnable - A task is becoming runnable on its associated CPU + * @runnable: A task is becoming runnable on its associated CPU * @p: task becoming runnable * @enq_flags: %SCX_ENQ_* * @@ -324,7 +324,7 @@ struct sched_ext_ops { void (*runnable)(struct task_struct *p, u64 enq_flags); /** - * running - A task is starting to run on its associated CPU + * @running: A task is starting to run on its associated CPU * @p: task starting to run * * See ->runnable() for explanation on the task state notifiers. @@ -332,7 +332,7 @@ struct sched_ext_ops { void (*running)(struct task_struct *p); /** - * stopping - A task is stopping execution + * @stopping: A task is stopping execution * @p: task stopping to run * @runnable: is task @p still runnable? * @@ -343,7 +343,7 @@ struct sched_ext_ops { void (*stopping)(struct task_struct *p, bool runnable); /** - * quiescent - A task is becoming not runnable on its associated CPU + * @quiescent: A task is becoming not runnable on its associated CPU * @p: task becoming not runnable * @deq_flags: %SCX_DEQ_* * @@ -363,7 +363,7 @@ struct sched_ext_ops { void (*quiescent)(struct task_struct *p, u64 deq_flags); /** - * yield - Yield CPU + * @yield: Yield CPU * @from: yielding task * @to: optional yield target task * @@ -378,7 +378,7 @@ struct sched_ext_ops { bool (*yield)(struct task_struct *from, struct task_struct *to); /** - * core_sched_before - Task ordering for core-sched + * @core_sched_before: Task ordering for core-sched * @a: task A * @b: task B * @@ -396,7 +396,7 @@ struct sched_ext_ops { bool (*core_sched_before)(struct task_struct *a, struct task_struct *b); /** - * set_weight - Set task weight + * @set_weight: Set task weight * @p: task to set weight for * @weight: new weight [1..10000] * @@ -405,7 +405,7 @@ struct sched_ext_ops { void (*set_weight)(struct task_struct *p, u32 weight); /** - * set_cpumask - Set CPU affinity + * @set_cpumask: Set CPU affinity * @p: task to set CPU affinity for * @cpumask: cpumask of cpus that @p can run on * @@ -415,7 +415,7 @@ struct sched_ext_ops { const struct cpumask *cpumask); /** - * update_idle - Update the idle state of a CPU + * @update_idle: Update the idle state of a CPU * @cpu: CPU to udpate the idle state for * @idle: whether entering or exiting the idle state * @@ -436,7 +436,7 @@ struct sched_ext_ops { void (*update_idle)(s32 cpu, bool idle); /** - * cpu_acquire - A CPU is becoming available to the BPF scheduler + * @cpu_acquire: A CPU is becoming available to the BPF scheduler * @cpu: The CPU being acquired by the BPF scheduler. * @args: Acquire arguments, see the struct definition. * @@ -446,7 +446,7 @@ struct sched_ext_ops { void (*cpu_acquire)(s32 cpu, struct scx_cpu_acquire_args *args); /** - * cpu_release - A CPU is taken away from the BPF scheduler + * @cpu_release: A CPU is taken away from the BPF scheduler * @cpu: The CPU being released by the BPF scheduler. * @args: Release arguments, see the struct definition. * @@ -458,7 +458,7 @@ struct sched_ext_ops { void (*cpu_release)(s32 cpu, struct scx_cpu_release_args *args); /** - * init_task - Initialize a task to run in a BPF scheduler + * @init_task: Initialize a task to run in a BPF scheduler * @p: task to initialize for BPF scheduling * @args: init arguments, see the struct definition * @@ -473,8 +473,9 @@ struct sched_ext_ops { s32 (*init_task)(struct task_struct *p, struct scx_init_task_args *args); /** - * exit_task - Exit a previously-running task from the system + * @exit_task: Exit a previously-running task from the system * @p: task to exit + * @args: exit arguments, see the struct definition * * @p is exiting or the BPF scheduler is being unloaded. Perform any * necessary cleanup for @p. @@ -482,7 +483,7 @@ struct sched_ext_ops { void (*exit_task)(struct task_struct *p, struct scx_exit_task_args *args); /** - * enable - Enable BPF scheduling for a task + * @enable: Enable BPF scheduling for a task * @p: task to enable BPF scheduling for * * Enable @p for BPF scheduling. enable() is called on @p any time it @@ -491,7 +492,7 @@ struct sched_ext_ops { void (*enable)(struct task_struct *p); /** - * disable - Disable BPF scheduling for a task + * @disable: Disable BPF scheduling for a task * @p: task to disable BPF scheduling for * * @p is exiting, leaving SCX or the BPF scheduler is being unloaded. @@ -501,7 +502,7 @@ struct sched_ext_ops { void (*disable)(struct task_struct *p); /** - * dump - Dump BPF scheduler state on error + * @dump: Dump BPF scheduler state on error * @ctx: debug dump context * * Use scx_bpf_dump() to generate BPF scheduler specific debug dump. @@ -509,7 +510,7 @@ struct sched_ext_ops { void (*dump)(struct scx_dump_ctx *ctx); /** - * dump_cpu - Dump BPF scheduler state for a CPU on error + * @dump_cpu: Dump BPF scheduler state for a CPU on error * @ctx: debug dump context * @cpu: CPU to generate debug dump for * @idle: @cpu is currently idle without any runnable tasks @@ -521,7 +522,7 @@ struct sched_ext_ops { void (*dump_cpu)(struct scx_dump_ctx *ctx, s32 cpu, bool idle); /** - * dump_task - Dump BPF scheduler state for a runnable task on error + * @dump_task: Dump BPF scheduler state for a runnable task on error * @ctx: debug dump context * @p: runnable task to generate debug dump for * @@ -532,7 +533,7 @@ struct sched_ext_ops { #ifdef CONFIG_EXT_GROUP_SCHED /** - * cgroup_init - Initialize a cgroup + * @cgroup_init: Initialize a cgroup * @cgrp: cgroup being initialized * @args: init arguments, see the struct definition * @@ -547,7 +548,7 @@ struct sched_ext_ops { struct scx_cgroup_init_args *args); /** - * cgroup_exit - Exit a cgroup + * @cgroup_exit: Exit a cgroup * @cgrp: cgroup being exited * * Either the BPF scheduler is being unloaded or @cgrp destroyed, exit @@ -556,7 +557,7 @@ struct sched_ext_ops { void (*cgroup_exit)(struct cgroup *cgrp); /** - * cgroup_prep_move - Prepare a task to be moved to a different cgroup + * @cgroup_prep_move: Prepare a task to be moved to a different cgroup * @p: task being moved * @from: cgroup @p is being moved from * @to: cgroup @p is being moved to @@ -571,7 +572,7 @@ struct sched_ext_ops { struct cgroup *from, struct cgroup *to); /** - * cgroup_move - Commit cgroup move + * @cgroup_move: Commit cgroup move * @p: task being moved * @from: cgroup @p is being moved from * @to: cgroup @p is being moved to @@ -582,7 +583,7 @@ struct sched_ext_ops { struct cgroup *from, struct cgroup *to); /** - * cgroup_cancel_move - Cancel cgroup move + * @cgroup_cancel_move: Cancel cgroup move * @p: task whose cgroup move is being canceled * @from: cgroup @p was being moved from * @to: cgroup @p was being moved to @@ -594,7 +595,7 @@ struct sched_ext_ops { struct cgroup *from, struct cgroup *to); /** - * cgroup_set_weight - A cgroup's weight is being changed + * @cgroup_set_weight: A cgroup's weight is being changed * @cgrp: cgroup whose weight is being updated * @weight: new weight [1..10000] * @@ -608,7 +609,7 @@ struct sched_ext_ops { */ /** - * cpu_online - A CPU became online + * @cpu_online: A CPU became online * @cpu: CPU which just came up * * @cpu just came online. @cpu will not call ops.enqueue() or @@ -617,7 +618,7 @@ struct sched_ext_ops { void (*cpu_online)(s32 cpu); /** - * cpu_offline - A CPU is going offline + * @cpu_offline: A CPU is going offline * @cpu: CPU which is going offline * * @cpu is going offline. @cpu will not call ops.enqueue() or @@ -630,12 +631,12 @@ struct sched_ext_ops { */ /** - * init - Initialize the BPF scheduler + * @init: Initialize the BPF scheduler */ s32 (*init)(void); /** - * exit - Clean up after the BPF scheduler + * @exit: Clean up after the BPF scheduler * @info: Exit info * * ops.exit() is also called on ops.init() failure, which is a bit @@ -645,17 +646,17 @@ struct sched_ext_ops { void (*exit)(struct scx_exit_info *info); /** - * dispatch_max_batch - Max nr of tasks that dispatch() can dispatch + * @dispatch_max_batch: Max nr of tasks that dispatch() can dispatch */ u32 dispatch_max_batch; /** - * flags - %SCX_OPS_* flags + * @flags: %SCX_OPS_* flags */ u64 flags; /** - * timeout_ms - The maximum amount of time, in milliseconds, that a + * @timeout_ms: The maximum amount of time, in milliseconds, that a * runnable task should be able to wait before being scheduled. The * maximum timeout may not exceed the default timeout of 30 seconds. * @@ -664,13 +665,13 @@ struct sched_ext_ops { u32 timeout_ms; /** - * exit_dump_len - scx_exit_info.dump buffer length. If 0, the default + * @exit_dump_len: scx_exit_info.dump buffer length. If 0, the default * value of 32768 is used. */ u32 exit_dump_len; /** - * hotplug_seq - A sequence number that may be set by the scheduler to + * @hotplug_seq: A sequence number that may be set by the scheduler to * detect when a hotplug event has occurred during the loading process. * If 0, no detection occurs. Otherwise, the scheduler will fail to * load if the sequence number does not match @scx_hotplug_seq on the @@ -679,7 +680,7 @@ struct sched_ext_ops { u64 hotplug_seq; /** - * name - BPF scheduler's name + * @name: BPF scheduler's name * * Must be a non-zero valid BPF object name including only isalnum(), * '_' and '.' chars. Shows up in kernel.sched_ext_ops sysctl while the @@ -1408,7 +1409,6 @@ static struct task_struct *scx_task_iter_next(struct scx_task_iter *iter) /** * scx_task_iter_next_locked - Next non-idle task with its rq locked * @iter: iterator to walk - * @include_dead: Whether we should include dead tasks in the iteration * * Visit the non-idle task with its rq lock held. Allows callers to specify * whether they would like to filter out dead tasks. See scx_task_iter_start() @@ -3132,6 +3132,7 @@ static struct task_struct *pick_task_scx(struct rq *rq) * scx_prio_less - Task ordering for core-sched * @a: task A * @b: task B + * @in_fi: in forced idle state * * Core-sched is implemented as an additional scheduling layer on top of the * usual sched_class'es and needs to find out the expected task ordering. For @@ -4700,6 +4701,7 @@ bool task_should_scx(int policy) /** * scx_softlockup - sched_ext softlockup handler + * @dur_s: number of seconds of CPU stuck due to soft lockup * * On some multi-socket setups (e.g. 2x Intel 8480c), the BPF scheduler can * live-lock the system by making many CPUs target the same DSQ to the point @@ -4743,6 +4745,7 @@ static void scx_clear_softlockup(void) /** * scx_ops_bypass - [Un]bypass scx_ops and guarantee forward progress + * @bypass: true for bypass, false for unbypass * * Bypassing guarantees that all runnable tasks make forward progress without * trusting the BPF scheduler. We can't grab any mutexes or rwsems as they might @@ -7254,7 +7257,7 @@ __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data, } /** - * scx_bpf_dump - Generate extra debug dump specific to the BPF scheduler + * scx_bpf_dump_bstr - Generate extra debug dump specific to the BPF scheduler * @fmt: format string * @data: format string parameters packaged using ___bpf_fill() macro * @data__sz: @data len, must end in '__sz' for the verifier @@ -7346,7 +7349,6 @@ __bpf_kfunc u32 scx_bpf_cpuperf_cur(s32 cpu) * scx_bpf_cpuperf_set - Set the relative performance target of a CPU * @cpu: CPU of interest * @perf: target performance level [0, %SCX_CPUPERF_ONE] - * @flags: %SCX_CPUPERF_* flags * * Set the target performance level of @cpu to @perf. @perf is in linear * relative scale between 0 and %SCX_CPUPERF_ONE. This determines how the @@ -7458,6 +7460,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void) /** * scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to * either the percpu, or SMT idle-tracking cpumask. + * @idle_mask: &cpumask to use */ __bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask) {