mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 01:02:08 +00:00
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext.git
This commit is contained in:
commit
f44b7127cb
@ -20985,6 +20985,8 @@ F: kernel/sched/
|
||||
SCHEDULER - SCHED_EXT
|
||||
R: Tejun Heo <tj@kernel.org>
|
||||
R: David Vernet <void@manifault.com>
|
||||
R: Andrea Righi <arighi@nvidia.com>
|
||||
R: Changwoo Min <changwoo@igalia.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
W: https://github.com/sched-ext/scx
|
||||
|
@ -960,7 +960,7 @@ static DEFINE_PER_CPU(struct task_struct *, direct_dispatch_task);
|
||||
static struct scx_dispatch_q **global_dsqs;
|
||||
|
||||
static const struct rhashtable_params dsq_hash_params = {
|
||||
.key_len = 8,
|
||||
.key_len = sizeof_field(struct scx_dispatch_q, id),
|
||||
.key_offset = offsetof(struct scx_dispatch_q, id),
|
||||
.head_offset = offsetof(struct scx_dispatch_q, hash_node),
|
||||
};
|
||||
@ -3215,6 +3215,74 @@ static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the amount of CPUs in the same LLC domain of @cpu (or zero if the LLC
|
||||
* domain is not defined).
|
||||
*/
|
||||
static unsigned int llc_weight(s32 cpu)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
|
||||
sd = rcu_dereference(per_cpu(sd_llc, cpu));
|
||||
if (!sd)
|
||||
return 0;
|
||||
|
||||
return sd->span_weight;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the cpumask representing the LLC domain of @cpu (or NULL if the LLC
|
||||
* domain is not defined).
|
||||
*/
|
||||
static struct cpumask *llc_span(s32 cpu)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
|
||||
sd = rcu_dereference(per_cpu(sd_llc, cpu));
|
||||
if (!sd)
|
||||
return 0;
|
||||
|
||||
return sched_domain_span(sd);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the amount of CPUs in the same NUMA domain of @cpu (or zero if the
|
||||
* NUMA domain is not defined).
|
||||
*/
|
||||
static unsigned int numa_weight(s32 cpu)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
struct sched_group *sg;
|
||||
|
||||
sd = rcu_dereference(per_cpu(sd_numa, cpu));
|
||||
if (!sd)
|
||||
return 0;
|
||||
sg = sd->groups;
|
||||
if (!sg)
|
||||
return 0;
|
||||
|
||||
return sg->group_weight;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the cpumask representing the NUMA domain of @cpu (or NULL if the NUMA
|
||||
* domain is not defined).
|
||||
*/
|
||||
static struct cpumask *numa_span(s32 cpu)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
struct sched_group *sg;
|
||||
|
||||
sd = rcu_dereference(per_cpu(sd_numa, cpu));
|
||||
if (!sd)
|
||||
return NULL;
|
||||
sg = sd->groups;
|
||||
if (!sg)
|
||||
return NULL;
|
||||
|
||||
return sched_group_span(sg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if the LLC domains do not perfectly overlap with the NUMA
|
||||
* domains, false otherwise.
|
||||
@ -3246,19 +3314,10 @@ static bool llc_numa_mismatch(void)
|
||||
* overlapping, which is incorrect (as NUMA 1 has two distinct LLC
|
||||
* domains).
|
||||
*/
|
||||
for_each_online_cpu(cpu) {
|
||||
const struct cpumask *numa_cpus;
|
||||
struct sched_domain *sd;
|
||||
|
||||
sd = rcu_dereference(per_cpu(sd_llc, cpu));
|
||||
if (!sd)
|
||||
for_each_online_cpu(cpu)
|
||||
if (llc_weight(cpu) != numa_weight(cpu))
|
||||
return true;
|
||||
|
||||
numa_cpus = cpumask_of_node(cpu_to_node(cpu));
|
||||
if (sd->span_weight != cpumask_weight(numa_cpus))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -3276,8 +3335,7 @@ static bool llc_numa_mismatch(void)
|
||||
static void update_selcpu_topology(void)
|
||||
{
|
||||
bool enable_llc = false, enable_numa = false;
|
||||
struct sched_domain *sd;
|
||||
const struct cpumask *cpus;
|
||||
unsigned int nr_cpus;
|
||||
s32 cpu = cpumask_first(cpu_online_mask);
|
||||
|
||||
/*
|
||||
@ -3291,10 +3349,12 @@ static void update_selcpu_topology(void)
|
||||
* CPUs.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
sd = rcu_dereference(per_cpu(sd_llc, cpu));
|
||||
if (sd) {
|
||||
if (sd->span_weight < num_online_cpus())
|
||||
nr_cpus = llc_weight(cpu);
|
||||
if (nr_cpus > 0) {
|
||||
if (nr_cpus < num_online_cpus())
|
||||
enable_llc = true;
|
||||
pr_debug("sched_ext: LLC=%*pb weight=%u\n",
|
||||
cpumask_pr_args(llc_span(cpu)), llc_weight(cpu));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3306,9 +3366,13 @@ static void update_selcpu_topology(void)
|
||||
* enabling both NUMA and LLC optimizations is unnecessary, as checking
|
||||
* for an idle CPU in the same domain twice is redundant.
|
||||
*/
|
||||
cpus = cpumask_of_node(cpu_to_node(cpu));
|
||||
if ((cpumask_weight(cpus) < num_online_cpus()) && llc_numa_mismatch())
|
||||
enable_numa = true;
|
||||
nr_cpus = numa_weight(cpu);
|
||||
if (nr_cpus > 0) {
|
||||
if (nr_cpus < num_online_cpus() && llc_numa_mismatch())
|
||||
enable_numa = true;
|
||||
pr_debug("sched_ext: NUMA=%*pb weight=%u\n",
|
||||
cpumask_pr_args(numa_span(cpu)), numa_weight(cpu));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
pr_debug("sched_ext: LLC idle selection %s\n",
|
||||
@ -3360,7 +3424,6 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
|
||||
|
||||
*found = false;
|
||||
|
||||
|
||||
/*
|
||||
* This is necessary to protect llc_cpus.
|
||||
*/
|
||||
@ -3379,15 +3442,10 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
|
||||
*/
|
||||
if (p->nr_cpus_allowed >= num_possible_cpus()) {
|
||||
if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
|
||||
numa_cpus = cpumask_of_node(cpu_to_node(prev_cpu));
|
||||
numa_cpus = numa_span(prev_cpu);
|
||||
|
||||
if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) {
|
||||
struct sched_domain *sd;
|
||||
|
||||
sd = rcu_dereference(per_cpu(sd_llc, prev_cpu));
|
||||
if (sd)
|
||||
llc_cpus = sched_domain_span(sd);
|
||||
}
|
||||
if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc))
|
||||
llc_cpus = llc_span(prev_cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4763,7 +4821,7 @@ static void scx_ops_bypass(bool bypass)
|
||||
* sees scx_rq_bypassing() before moving tasks to SCX.
|
||||
*/
|
||||
if (!scx_enabled()) {
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
rq_unlock(rq, &rf);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
#ifdef LSP
|
||||
#define __bpf__
|
||||
#include "../vmlinux/vmlinux.h"
|
||||
#include "../vmlinux.h"
|
||||
#else
|
||||
#include "vmlinux.h"
|
||||
#endif
|
||||
@ -24,6 +24,10 @@
|
||||
#define PF_EXITING 0x00000004
|
||||
#define CLOCK_MONOTONIC 1
|
||||
|
||||
extern int LINUX_KERNEL_VERSION __kconfig;
|
||||
extern const char CONFIG_CC_VERSION_TEXT[64] __kconfig __weak;
|
||||
extern const char CONFIG_LOCALVERSION[64] __kconfig __weak;
|
||||
|
||||
/*
|
||||
* Earlier versions of clang/pahole lost upper 32bits in 64bit enums which can
|
||||
* lead to really confusing misbehaviors. Let's trigger a build failure.
|
||||
@ -40,9 +44,9 @@ void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_fl
|
||||
void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
|
||||
u32 scx_bpf_dispatch_nr_slots(void) __ksym;
|
||||
void scx_bpf_dispatch_cancel(void) __ksym;
|
||||
bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym;
|
||||
void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym;
|
||||
void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym;
|
||||
bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym __weak;
|
||||
void scx_bpf_dsq_move_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
|
||||
void scx_bpf_dsq_move_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
|
||||
bool scx_bpf_dsq_move(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
|
||||
bool scx_bpf_dsq_move_vtime(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
|
||||
u32 scx_bpf_reenqueue_local(void) __ksym;
|
||||
@ -98,7 +102,7 @@ void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
|
||||
_Pragma("GCC diagnostic push") \
|
||||
_Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
|
||||
___bpf_fill(___param, args); \
|
||||
_Pragma("GCC diagnostic pop") \
|
||||
_Pragma("GCC diagnostic pop")
|
||||
|
||||
/*
|
||||
* scx_bpf_exit() wraps the scx_bpf_exit_bstr() kfunc with variadic arguments
|
||||
@ -136,6 +140,20 @@ void ___scx_bpf_bstr_format_checker(const char *fmt, ...) {}
|
||||
___scx_bpf_bstr_format_checker(fmt, ##args); \
|
||||
})
|
||||
|
||||
/*
|
||||
* scx_bpf_dump_header() is a wrapper around scx_bpf_dump that adds a header
|
||||
* of system information for debugging.
|
||||
*/
|
||||
#define scx_bpf_dump_header() \
|
||||
({ \
|
||||
scx_bpf_dump("kernel: %d.%d.%d %s\ncc: %s\n", \
|
||||
LINUX_KERNEL_VERSION >> 16, \
|
||||
LINUX_KERNEL_VERSION >> 8 & 0xFF, \
|
||||
LINUX_KERNEL_VERSION & 0xFF, \
|
||||
CONFIG_LOCALVERSION, \
|
||||
CONFIG_CC_VERSION_TEXT); \
|
||||
})
|
||||
|
||||
#define BPF_STRUCT_OPS(name, args...) \
|
||||
SEC("struct_ops/"#name) \
|
||||
BPF_PROG(name, ##args)
|
||||
@ -317,6 +335,66 @@ u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
|
||||
const struct cpumask *src2) __ksym;
|
||||
u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
|
||||
|
||||
int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) __ksym;
|
||||
int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym;
|
||||
void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym;
|
||||
|
||||
#define def_iter_struct(name) \
|
||||
struct bpf_iter_##name { \
|
||||
struct bpf_iter_bits it; \
|
||||
const struct cpumask *bitmap; \
|
||||
};
|
||||
|
||||
#define def_iter_new(name) \
|
||||
static inline int bpf_iter_##name##_new( \
|
||||
struct bpf_iter_##name *it, const u64 *unsafe_ptr__ign, u32 nr_words) \
|
||||
{ \
|
||||
it->bitmap = scx_bpf_get_##name##_cpumask(); \
|
||||
return bpf_iter_bits_new(&it->it, (const u64 *)it->bitmap, \
|
||||
sizeof(struct cpumask) / 8); \
|
||||
}
|
||||
|
||||
#define def_iter_next(name) \
|
||||
static inline int *bpf_iter_##name##_next(struct bpf_iter_##name *it) { \
|
||||
return bpf_iter_bits_next(&it->it); \
|
||||
}
|
||||
|
||||
#define def_iter_destroy(name) \
|
||||
static inline void bpf_iter_##name##_destroy(struct bpf_iter_##name *it) { \
|
||||
scx_bpf_put_cpumask(it->bitmap); \
|
||||
bpf_iter_bits_destroy(&it->it); \
|
||||
}
|
||||
#define def_for_each_cpu(cpu, name) for_each_##name##_cpu(cpu)
|
||||
|
||||
/// Provides iterator for possible and online cpus.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// static inline void example_use() {
|
||||
/// int *cpu;
|
||||
///
|
||||
/// for_each_possible_cpu(cpu){
|
||||
/// bpf_printk("CPU %d is possible", *cpu);
|
||||
/// }
|
||||
///
|
||||
/// for_each_online_cpu(cpu){
|
||||
/// bpf_printk("CPU %d is online", *cpu);
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
def_iter_struct(possible);
|
||||
def_iter_new(possible);
|
||||
def_iter_next(possible);
|
||||
def_iter_destroy(possible);
|
||||
#define for_each_possible_cpu(cpu) bpf_for_each(possible, cpu, NULL, 0)
|
||||
|
||||
def_iter_struct(online);
|
||||
def_iter_new(online);
|
||||
def_iter_next(online);
|
||||
def_iter_destroy(online);
|
||||
#define for_each_online_cpu(cpu) bpf_for_each(online, cpu, NULL, 0)
|
||||
|
||||
/*
|
||||
* Access a cpumask in read-only mode (typically to check bits).
|
||||
*/
|
||||
@ -423,5 +501,6 @@ static inline u32 log2_u64(u64 v)
|
||||
}
|
||||
|
||||
#include "compat.bpf.h"
|
||||
#include "enums.bpf.h"
|
||||
|
||||
#endif /* __SCX_COMMON_BPF_H */
|
||||
|
@ -71,5 +71,11 @@ typedef int64_t s64;
|
||||
|
||||
#include "user_exit_info.h"
|
||||
#include "compat.h"
|
||||
#include "enums.h"
|
||||
|
||||
/* not available when building kernel tools/sched_ext */
|
||||
#if __has_include(<lib/sdt_task.h>)
|
||||
#include <lib/sdt_task.h>
|
||||
#endif
|
||||
|
||||
#endif /* __SCHED_EXT_COMMON_H */
|
||||
|
@ -149,6 +149,7 @@ static inline long scx_hotplug_seq(void)
|
||||
__skel = __scx_name##__open(); \
|
||||
SCX_BUG_ON(!__skel, "Could not open " #__scx_name); \
|
||||
__skel->struct_ops.__ops_name->hotplug_seq = scx_hotplug_seq(); \
|
||||
SCX_ENUM_INIT(__skel); \
|
||||
__skel; \
|
||||
})
|
||||
|
||||
|
105
tools/sched_ext/include/scx/enums.autogen.bpf.h
Normal file
105
tools/sched_ext/include/scx/enums.autogen.bpf.h
Normal file
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* WARNING: This file is autogenerated from scripts/gen_enums.py. If you would
|
||||
* like to access an enum that is currently missing, add it to the script
|
||||
* and run it from the root directory to update this file.
|
||||
*/
|
||||
|
||||
const volatile u64 __SCX_OPS_NAME_LEN __weak;
|
||||
#define SCX_OPS_NAME_LEN __SCX_OPS_NAME_LEN
|
||||
|
||||
const volatile u64 __SCX_SLICE_DFL __weak;
|
||||
#define SCX_SLICE_DFL __SCX_SLICE_DFL
|
||||
|
||||
const volatile u64 __SCX_SLICE_INF __weak;
|
||||
#define SCX_SLICE_INF __SCX_SLICE_INF
|
||||
|
||||
const volatile u64 __SCX_DSQ_FLAG_BUILTIN __weak;
|
||||
#define SCX_DSQ_FLAG_BUILTIN __SCX_DSQ_FLAG_BUILTIN
|
||||
|
||||
const volatile u64 __SCX_DSQ_FLAG_LOCAL_ON __weak;
|
||||
#define SCX_DSQ_FLAG_LOCAL_ON __SCX_DSQ_FLAG_LOCAL_ON
|
||||
|
||||
const volatile u64 __SCX_DSQ_INVALID __weak;
|
||||
#define SCX_DSQ_INVALID __SCX_DSQ_INVALID
|
||||
|
||||
const volatile u64 __SCX_DSQ_GLOBAL __weak;
|
||||
#define SCX_DSQ_GLOBAL __SCX_DSQ_GLOBAL
|
||||
|
||||
const volatile u64 __SCX_DSQ_LOCAL __weak;
|
||||
#define SCX_DSQ_LOCAL __SCX_DSQ_LOCAL
|
||||
|
||||
const volatile u64 __SCX_DSQ_LOCAL_ON __weak;
|
||||
#define SCX_DSQ_LOCAL_ON __SCX_DSQ_LOCAL_ON
|
||||
|
||||
const volatile u64 __SCX_DSQ_LOCAL_CPU_MASK __weak;
|
||||
#define SCX_DSQ_LOCAL_CPU_MASK __SCX_DSQ_LOCAL_CPU_MASK
|
||||
|
||||
const volatile u64 __SCX_TASK_QUEUED __weak;
|
||||
#define SCX_TASK_QUEUED __SCX_TASK_QUEUED
|
||||
|
||||
const volatile u64 __SCX_TASK_RESET_RUNNABLE_AT __weak;
|
||||
#define SCX_TASK_RESET_RUNNABLE_AT __SCX_TASK_RESET_RUNNABLE_AT
|
||||
|
||||
const volatile u64 __SCX_TASK_DEQD_FOR_SLEEP __weak;
|
||||
#define SCX_TASK_DEQD_FOR_SLEEP __SCX_TASK_DEQD_FOR_SLEEP
|
||||
|
||||
const volatile u64 __SCX_TASK_STATE_SHIFT __weak;
|
||||
#define SCX_TASK_STATE_SHIFT __SCX_TASK_STATE_SHIFT
|
||||
|
||||
const volatile u64 __SCX_TASK_STATE_BITS __weak;
|
||||
#define SCX_TASK_STATE_BITS __SCX_TASK_STATE_BITS
|
||||
|
||||
const volatile u64 __SCX_TASK_STATE_MASK __weak;
|
||||
#define SCX_TASK_STATE_MASK __SCX_TASK_STATE_MASK
|
||||
|
||||
const volatile u64 __SCX_TASK_CURSOR __weak;
|
||||
#define SCX_TASK_CURSOR __SCX_TASK_CURSOR
|
||||
|
||||
const volatile u64 __SCX_TASK_NONE __weak;
|
||||
#define SCX_TASK_NONE __SCX_TASK_NONE
|
||||
|
||||
const volatile u64 __SCX_TASK_INIT __weak;
|
||||
#define SCX_TASK_INIT __SCX_TASK_INIT
|
||||
|
||||
const volatile u64 __SCX_TASK_READY __weak;
|
||||
#define SCX_TASK_READY __SCX_TASK_READY
|
||||
|
||||
const volatile u64 __SCX_TASK_ENABLED __weak;
|
||||
#define SCX_TASK_ENABLED __SCX_TASK_ENABLED
|
||||
|
||||
const volatile u64 __SCX_TASK_NR_STATES __weak;
|
||||
#define SCX_TASK_NR_STATES __SCX_TASK_NR_STATES
|
||||
|
||||
const volatile u64 __SCX_TASK_DSQ_ON_PRIQ __weak;
|
||||
#define SCX_TASK_DSQ_ON_PRIQ __SCX_TASK_DSQ_ON_PRIQ
|
||||
|
||||
const volatile u64 __SCX_KICK_IDLE __weak;
|
||||
#define SCX_KICK_IDLE __SCX_KICK_IDLE
|
||||
|
||||
const volatile u64 __SCX_KICK_PREEMPT __weak;
|
||||
#define SCX_KICK_PREEMPT __SCX_KICK_PREEMPT
|
||||
|
||||
const volatile u64 __SCX_KICK_WAIT __weak;
|
||||
#define SCX_KICK_WAIT __SCX_KICK_WAIT
|
||||
|
||||
const volatile u64 __SCX_ENQ_WAKEUP __weak;
|
||||
#define SCX_ENQ_WAKEUP __SCX_ENQ_WAKEUP
|
||||
|
||||
const volatile u64 __SCX_ENQ_HEAD __weak;
|
||||
#define SCX_ENQ_HEAD __SCX_ENQ_HEAD
|
||||
|
||||
const volatile u64 __SCX_ENQ_PREEMPT __weak;
|
||||
#define SCX_ENQ_PREEMPT __SCX_ENQ_PREEMPT
|
||||
|
||||
const volatile u64 __SCX_ENQ_REENQ __weak;
|
||||
#define SCX_ENQ_REENQ __SCX_ENQ_REENQ
|
||||
|
||||
const volatile u64 __SCX_ENQ_LAST __weak;
|
||||
#define SCX_ENQ_LAST __SCX_ENQ_LAST
|
||||
|
||||
const volatile u64 __SCX_ENQ_CLEAR_OPSS __weak;
|
||||
#define SCX_ENQ_CLEAR_OPSS __SCX_ENQ_CLEAR_OPSS
|
||||
|
||||
const volatile u64 __SCX_ENQ_DSQ_PRIQ __weak;
|
||||
#define SCX_ENQ_DSQ_PRIQ __SCX_ENQ_DSQ_PRIQ
|
||||
|
41
tools/sched_ext/include/scx/enums.autogen.h
Normal file
41
tools/sched_ext/include/scx/enums.autogen.h
Normal file
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* WARNING: This file is autogenerated from scripts/gen_enums.py. If you would
|
||||
* like to access an enum that is currently missing, add it to the script
|
||||
* and run it from the root directory to update this file.
|
||||
*/
|
||||
|
||||
#define SCX_ENUM_INIT(skel) do { \
|
||||
SCX_ENUM_SET(skel, scx_public_consts, SCX_OPS_NAME_LEN); \
|
||||
SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_DFL); \
|
||||
SCX_ENUM_SET(skel, scx_public_consts, SCX_SLICE_INF); \
|
||||
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_BUILTIN); \
|
||||
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_FLAG_LOCAL_ON); \
|
||||
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_INVALID); \
|
||||
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_GLOBAL); \
|
||||
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_LOCAL); \
|
||||
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_LOCAL_ON); \
|
||||
SCX_ENUM_SET(skel, scx_dsq_id_flags, SCX_DSQ_LOCAL_CPU_MASK); \
|
||||
SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_QUEUED); \
|
||||
SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_RESET_RUNNABLE_AT); \
|
||||
SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_DEQD_FOR_SLEEP); \
|
||||
SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_SHIFT); \
|
||||
SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_BITS); \
|
||||
SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_STATE_MASK); \
|
||||
SCX_ENUM_SET(skel, scx_ent_flags, SCX_TASK_CURSOR); \
|
||||
SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_NONE); \
|
||||
SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_INIT); \
|
||||
SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_READY); \
|
||||
SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_ENABLED); \
|
||||
SCX_ENUM_SET(skel, scx_task_state, SCX_TASK_NR_STATES); \
|
||||
SCX_ENUM_SET(skel, scx_ent_dsq_flags, SCX_TASK_DSQ_ON_PRIQ); \
|
||||
SCX_ENUM_SET(skel, scx_kick_flags, SCX_KICK_IDLE); \
|
||||
SCX_ENUM_SET(skel, scx_kick_flags, SCX_KICK_PREEMPT); \
|
||||
SCX_ENUM_SET(skel, scx_kick_flags, SCX_KICK_WAIT); \
|
||||
SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_WAKEUP); \
|
||||
SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_HEAD); \
|
||||
SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_PREEMPT); \
|
||||
SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_REENQ); \
|
||||
SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_LAST); \
|
||||
SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_CLEAR_OPSS); \
|
||||
SCX_ENUM_SET(skel, scx_enq_flags, SCX_ENQ_DSQ_PRIQ); \
|
||||
} while (0)
|
12
tools/sched_ext/include/scx/enums.bpf.h
Normal file
12
tools/sched_ext/include/scx/enums.bpf.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Convenience macros for getting/setting struct scx_enums instances.
|
||||
*
|
||||
* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
|
||||
*/
|
||||
#ifndef __SCX_ENUMS_BPF_H
|
||||
#define __SCX_ENUMS_BPF_H
|
||||
|
||||
#include "enums.autogen.bpf.h"
|
||||
|
||||
#endif /* __SCX_ENUMS_BPF_H */
|
27
tools/sched_ext/include/scx/enums.h
Normal file
27
tools/sched_ext/include/scx/enums.h
Normal file
@ -0,0 +1,27 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Define struct scx_enums that stores the load-time values of enums
|
||||
* used by the BPF program.
|
||||
*
|
||||
* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
|
||||
*/
|
||||
|
||||
#ifndef __SCX_ENUMS_H
|
||||
#define __SCX_ENUMS_H
|
||||
|
||||
static inline void __ENUM_set(u64 *val, char *type, char *name)
|
||||
{
|
||||
bool res;
|
||||
|
||||
res = __COMPAT_read_enum(type, name, val);
|
||||
SCX_BUG_ON(!res, "enum not found(%s)", name);
|
||||
}
|
||||
|
||||
#define SCX_ENUM_SET(skel, type, name) do { \
|
||||
__ENUM_set(&skel->rodata->__##name, #type, #name); \
|
||||
} while (0)
|
||||
|
||||
|
||||
#include "enums.autogen.h"
|
||||
|
||||
#endif /* __SCX_ENUMS_H */
|
@ -10,6 +10,11 @@
|
||||
#ifndef __USER_EXIT_INFO_H
|
||||
#define __USER_EXIT_INFO_H
|
||||
|
||||
#ifdef LSP
|
||||
#define __bpf__
|
||||
#include "../vmlinux.h"
|
||||
#endif
|
||||
|
||||
enum uei_sizes {
|
||||
UEI_REASON_LEN = 128,
|
||||
UEI_MSG_LEN = 1024,
|
||||
@ -25,9 +30,7 @@ struct user_exit_info {
|
||||
|
||||
#ifdef __bpf__
|
||||
|
||||
#ifdef LSP
|
||||
#include "../vmlinux/vmlinux.h"
|
||||
#else
|
||||
#ifndef LSP
|
||||
#include "vmlinux.h"
|
||||
#endif
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
@ -57,7 +57,7 @@ enum {
|
||||
|
||||
const volatile s32 central_cpu;
|
||||
const volatile u32 nr_cpu_ids = 1; /* !0 for veristat, set during init */
|
||||
const volatile u64 slice_ns = SCX_SLICE_DFL;
|
||||
const volatile u64 slice_ns;
|
||||
|
||||
bool timer_pinned = true;
|
||||
u64 nr_total, nr_locals, nr_queued, nr_lost_pids;
|
||||
|
@ -58,6 +58,7 @@ int main(int argc, char **argv)
|
||||
|
||||
skel->rodata->central_cpu = 0;
|
||||
skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus();
|
||||
skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
|
||||
|
||||
while ((opt = getopt(argc, argv, "s:c:pvh")) != -1) {
|
||||
switch (opt) {
|
||||
@ -97,7 +98,7 @@ int main(int argc, char **argv)
|
||||
SCX_BUG_ON(!cpuset, "Failed to allocate cpuset");
|
||||
CPU_ZERO(cpuset);
|
||||
CPU_SET(skel->rodata->central_cpu, cpuset);
|
||||
SCX_BUG_ON(sched_setaffinity(0, sizeof(cpuset), cpuset),
|
||||
SCX_BUG_ON(sched_setaffinity(0, sizeof(*cpuset), cpuset),
|
||||
"Failed to affinitize to central CPU %d (max %d)",
|
||||
skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1);
|
||||
CPU_FREE(cpuset);
|
||||
|
@ -57,7 +57,7 @@ enum {
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
const volatile u32 nr_cpus = 32; /* !0 for veristat, set during init */
|
||||
const volatile u64 cgrp_slice_ns = SCX_SLICE_DFL;
|
||||
const volatile u64 cgrp_slice_ns;
|
||||
const volatile bool fifo_sched;
|
||||
|
||||
u64 cvtime_now;
|
||||
|
@ -137,6 +137,7 @@ int main(int argc, char **argv)
|
||||
skel = SCX_OPS_OPEN(flatcg_ops, scx_flatcg);
|
||||
|
||||
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
|
||||
skel->rodata->cgrp_slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
|
||||
|
||||
while ((opt = getopt(argc, argv, "s:i:dfvh")) != -1) {
|
||||
double v;
|
||||
|
@ -33,7 +33,7 @@ enum consts {
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
const volatile u64 slice_ns = SCX_SLICE_DFL;
|
||||
const volatile u64 slice_ns;
|
||||
const volatile u32 stall_user_nth;
|
||||
const volatile u32 stall_kernel_nth;
|
||||
const volatile u32 dsp_inf_loop_after;
|
||||
|
@ -64,6 +64,8 @@ int main(int argc, char **argv)
|
||||
|
||||
skel = SCX_OPS_OPEN(qmap_ops, scx_qmap);
|
||||
|
||||
skel->rodata->slice_ns = __COMPAT_ENUM_OR_ZERO("scx_public_consts", "SCX_SLICE_DFL");
|
||||
|
||||
while ((opt = getopt(argc, argv, "s:e:t:T:l:b:PHd:D:Spvh")) != -1) {
|
||||
switch (opt) {
|
||||
case 's':
|
||||
|
@ -20,7 +20,7 @@ s32 BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_select_cpu, struct task_struct *p,
|
||||
* If we dispatch to a bogus DSQ that will fall back to the
|
||||
* builtin global DSQ, we fail gracefully.
|
||||
*/
|
||||
scx_bpf_dispatch_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
|
||||
scx_bpf_dsq_insert_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
|
||||
p->scx.dsq_vtime, 0);
|
||||
return cpu;
|
||||
}
|
||||
|
@ -17,8 +17,8 @@ s32 BPF_STRUCT_OPS(ddsp_vtimelocal_fail_select_cpu, struct task_struct *p,
|
||||
|
||||
if (cpu >= 0) {
|
||||
/* Shouldn't be allowed to vtime dispatch to a builtin DSQ. */
|
||||
scx_bpf_dispatch_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
|
||||
p->scx.dsq_vtime, 0);
|
||||
scx_bpf_dsq_insert_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
|
||||
p->scx.dsq_vtime, 0);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev)
|
||||
|
||||
target = bpf_get_prandom_u32() % nr_cpus;
|
||||
|
||||
scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
|
||||
bpf_task_release(p);
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
|
||||
/* Can only call from ops.select_cpu() */
|
||||
scx_bpf_select_cpu_dfl(p, 0, 0, &found);
|
||||
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
|
@ -33,7 +33,7 @@ void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags)
|
||||
if (exit_point == EXIT_ENQUEUE)
|
||||
EXIT_CLEANLY();
|
||||
|
||||
scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
|
||||
@ -41,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
|
||||
if (exit_point == EXIT_DISPATCH)
|
||||
EXIT_CLEANLY();
|
||||
|
||||
scx_bpf_consume(DSQ_ID);
|
||||
scx_bpf_dsq_move_to_local(DSQ_ID);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
|
||||
|
@ -12,6 +12,8 @@
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
#define DSQ_ID 0
|
||||
|
||||
s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
|
||||
u64 wake_flags)
|
||||
{
|
||||
@ -20,7 +22,7 @@ s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
|
||||
|
||||
void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags)
|
||||
{
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
|
||||
@ -28,7 +30,7 @@ void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
|
||||
|
||||
void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
|
||||
{
|
||||
scx_bpf_consume(SCX_DSQ_GLOBAL);
|
||||
scx_bpf_dsq_move_to_local(DSQ_ID);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags)
|
||||
@ -123,7 +125,7 @@ void BPF_STRUCT_OPS(maximal_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
|
||||
|
||||
s32 BPF_STRUCT_OPS_SLEEPABLE(maximal_init)
|
||||
{
|
||||
return 0;
|
||||
return scx_bpf_create_dsq(DSQ_ID, -1);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info)
|
||||
|
@ -30,7 +30,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
|
||||
}
|
||||
scx_bpf_put_idle_cpumask(idle_mask);
|
||||
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
SEC(".struct_ops.link")
|
||||
|
@ -67,7 +67,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_enqueue, struct task_struct *p,
|
||||
saw_local = true;
|
||||
}
|
||||
|
||||
scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags);
|
||||
scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, enq_flags);
|
||||
}
|
||||
|
||||
s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task,
|
||||
|
@ -29,7 +29,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_select_cpu, struct task_struct *p,
|
||||
cpu = prev_cpu;
|
||||
|
||||
dispatch:
|
||||
scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, 0);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_select_cpu, struct task_struct *p
|
||||
s32 prev_cpu, u64 wake_flags)
|
||||
{
|
||||
/* Dispatching to a random DSQ should fail. */
|
||||
scx_bpf_dispatch(p, 0xcafef00d, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dsq_insert(p, 0xcafef00d, SCX_SLICE_DFL, 0);
|
||||
|
||||
return prev_cpu;
|
||||
}
|
||||
|
@ -18,8 +18,8 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_select_cpu, struct task_struct *p
|
||||
s32 prev_cpu, u64 wake_flags)
|
||||
{
|
||||
/* Dispatching twice in a row is disallowed. */
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
|
||||
scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
|
||||
|
||||
return prev_cpu;
|
||||
}
|
||||
|
@ -2,8 +2,8 @@
|
||||
/*
|
||||
* A scheduler that validates that enqueue flags are properly stored and
|
||||
* applied at dispatch time when a task is directly dispatched from
|
||||
* ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and
|
||||
* making the test a very basic vtime scheduler.
|
||||
* ops.select_cpu(). We validate this by using scx_bpf_dsq_insert_vtime(),
|
||||
* and making the test a very basic vtime scheduler.
|
||||
*
|
||||
* Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
|
||||
* Copyright (c) 2024 David Vernet <dvernet@meta.com>
|
||||
@ -47,13 +47,13 @@ s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p,
|
||||
cpu = prev_cpu;
|
||||
scx_bpf_test_and_clear_cpu_idle(cpu);
|
||||
ddsp:
|
||||
scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
|
||||
scx_bpf_dsq_insert_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
|
||||
{
|
||||
if (scx_bpf_consume(VTIME_DSQ))
|
||||
if (scx_bpf_dsq_move_to_local(VTIME_DSQ))
|
||||
consumed = true;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user