mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-04 04:04:19 +00:00
06e51be3d5
Currently, a dsq is always a FIFO. A task which is dispatched earlier gets consumed or executed earlier. While this is sufficient when dsq's are used for simple staging areas for tasks which are ready to execute, it'd make dsq's a lot more useful if they can implement custom ordering. This patch adds a vtime-ordered priority queue to dsq's. When the BPF scheduler dispatches a task with the new scx_bpf_dispatch_vtime() helper, it can specify the vtime tha the task should be inserted at and the task is inserted into the priority queue in the dsq which is ordered according to time_before64() comparison of the vtime values. A DSQ can either be a FIFO or priority queue and automatically switches between the two depending on whether scx_bpf_dispatch() or scx_bpf_dispatch_vtime() is used. Using the wrong variant while the DSQ already has the other type queued is not allowed and triggers an ops error. Built-in DSQs must always be FIFOs. This makes it very easy for the BPF schedulers to implement proper vtime based scheduling within each dsq very easy and efficient at a negligible cost in terms of code complexity and overhead. scx_simple and scx_example_flatcg are updated to default to weighted vtime scheduling (the latter within each cgroup). FIFO scheduling can be selected with -f option. v4: - As allowing mixing priority queue and FIFO on the same DSQ sometimes led to unexpected starvations, DSQs now error out if both modes are used at the same time and the built-in DSQs are no longer allowed to be priority queues. - Explicit type struct scx_dsq_node added to contain fields needed to be linked on DSQs. This will be used to implement stateful iterator. - Tasks are now always linked on dsq->list whether the DSQ is in FIFO or PRIQ mode. This confines PRIQ related complexities to the enqueue and dequeue paths. Other paths only need to look at dsq->list. This will also ease implementing BPF iterator. - Print p->scx.dsq_flags in debug dump. v3: - SCX_TASK_DSQ_ON_PRIQ flag is moved from p->scx.flags into its own p->scx.dsq_flags. The flag is protected with the dsq lock unlike other flags in p->scx.flags. This led to flag corruption in some cases. - Add comments explaining the interaction between using consumption of p->scx.slice to determine vtime progress and yielding. v2: - p->scx.dsq_vtime was not initialized on load or across cgroup migrations leading to some tasks being stalled for extended period of time depending on how saturated the machine is. Fixed. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: David Vernet <dvernet@meta.com>
232 lines
6.5 KiB
C
232 lines
6.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/init_task.h>
|
|
#include <linux/export.h>
|
|
#include <linux/mqueue.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/sysctl.h>
|
|
#include <linux/sched/rt.h>
|
|
#include <linux/sched/task.h>
|
|
#include <linux/sched/ext.h>
|
|
#include <linux/init.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/audit.h>
|
|
#include <linux/numa.h>
|
|
#include <linux/scs.h>
|
|
#include <linux/plist.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
static struct signal_struct init_signals = {
|
|
.nr_threads = 1,
|
|
.thread_head = LIST_HEAD_INIT(init_task.thread_node),
|
|
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(init_signals.wait_chldexit),
|
|
.shared_pending = {
|
|
.list = LIST_HEAD_INIT(init_signals.shared_pending.list),
|
|
.signal = {{0}}
|
|
},
|
|
.multiprocess = HLIST_HEAD_INIT,
|
|
.rlim = INIT_RLIMITS,
|
|
.cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
|
|
.exec_update_lock = __RWSEM_INITIALIZER(init_signals.exec_update_lock),
|
|
#ifdef CONFIG_POSIX_TIMERS
|
|
.posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
|
|
.cputimer = {
|
|
.cputime_atomic = INIT_CPUTIME_ATOMIC,
|
|
},
|
|
#endif
|
|
INIT_CPU_TIMERS(init_signals)
|
|
.pids = {
|
|
[PIDTYPE_PID] = &init_struct_pid,
|
|
[PIDTYPE_TGID] = &init_struct_pid,
|
|
[PIDTYPE_PGID] = &init_struct_pid,
|
|
[PIDTYPE_SID] = &init_struct_pid,
|
|
},
|
|
INIT_PREV_CPUTIME(init_signals)
|
|
};
|
|
|
|
static struct sighand_struct init_sighand = {
|
|
.count = REFCOUNT_INIT(1),
|
|
.action = { { { .sa_handler = SIG_DFL, } }, },
|
|
.siglock = __SPIN_LOCK_UNLOCKED(init_sighand.siglock),
|
|
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh),
|
|
};
|
|
|
|
#ifdef CONFIG_SHADOW_CALL_STACK
|
|
unsigned long init_shadow_call_stack[SCS_SIZE / sizeof(long)] = {
|
|
[(SCS_SIZE / sizeof(long)) - 1] = SCS_END_MAGIC
|
|
};
|
|
#endif
|
|
|
|
/*
|
|
* Set up the first task table, touch at your own risk!. Base=0,
|
|
* limit=0x1fffff (=2MB)
|
|
*/
|
|
struct task_struct init_task __aligned(L1_CACHE_BYTES) = {
|
|
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
|
.thread_info = INIT_THREAD_INFO(init_task),
|
|
.stack_refcount = REFCOUNT_INIT(1),
|
|
#endif
|
|
.__state = 0,
|
|
.stack = init_stack,
|
|
.usage = REFCOUNT_INIT(2),
|
|
.flags = PF_KTHREAD,
|
|
.prio = MAX_PRIO - 20,
|
|
.static_prio = MAX_PRIO - 20,
|
|
.normal_prio = MAX_PRIO - 20,
|
|
.policy = SCHED_NORMAL,
|
|
.cpus_ptr = &init_task.cpus_mask,
|
|
.user_cpus_ptr = NULL,
|
|
.cpus_mask = CPU_MASK_ALL,
|
|
.max_allowed_capacity = SCHED_CAPACITY_SCALE,
|
|
.nr_cpus_allowed= NR_CPUS,
|
|
.mm = NULL,
|
|
.active_mm = &init_mm,
|
|
.faults_disabled_mapping = NULL,
|
|
.restart_block = {
|
|
.fn = do_no_restart_syscall,
|
|
},
|
|
.se = {
|
|
.group_node = LIST_HEAD_INIT(init_task.se.group_node),
|
|
},
|
|
.rt = {
|
|
.run_list = LIST_HEAD_INIT(init_task.rt.run_list),
|
|
.time_slice = RR_TIMESLICE,
|
|
},
|
|
.tasks = LIST_HEAD_INIT(init_task.tasks),
|
|
#ifdef CONFIG_SMP
|
|
.pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
|
|
#endif
|
|
#ifdef CONFIG_CGROUP_SCHED
|
|
.sched_task_group = &root_task_group,
|
|
#endif
|
|
#ifdef CONFIG_SCHED_CLASS_EXT
|
|
.scx = {
|
|
.dsq_node.list = LIST_HEAD_INIT(init_task.scx.dsq_node.list),
|
|
.sticky_cpu = -1,
|
|
.holding_cpu = -1,
|
|
.runnable_node = LIST_HEAD_INIT(init_task.scx.runnable_node),
|
|
.runnable_at = INITIAL_JIFFIES,
|
|
.ddsp_dsq_id = SCX_DSQ_INVALID,
|
|
.slice = SCX_SLICE_DFL,
|
|
},
|
|
#endif
|
|
.ptraced = LIST_HEAD_INIT(init_task.ptraced),
|
|
.ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry),
|
|
.real_parent = &init_task,
|
|
.parent = &init_task,
|
|
.children = LIST_HEAD_INIT(init_task.children),
|
|
.sibling = LIST_HEAD_INIT(init_task.sibling),
|
|
.group_leader = &init_task,
|
|
RCU_POINTER_INITIALIZER(real_cred, &init_cred),
|
|
RCU_POINTER_INITIALIZER(cred, &init_cred),
|
|
.comm = INIT_TASK_COMM,
|
|
.thread = INIT_THREAD,
|
|
.fs = &init_fs,
|
|
.files = &init_files,
|
|
#ifdef CONFIG_IO_URING
|
|
.io_uring = NULL,
|
|
#endif
|
|
.signal = &init_signals,
|
|
.sighand = &init_sighand,
|
|
.nsproxy = &init_nsproxy,
|
|
.pending = {
|
|
.list = LIST_HEAD_INIT(init_task.pending.list),
|
|
.signal = {{0}}
|
|
},
|
|
.blocked = {{0}},
|
|
.alloc_lock = __SPIN_LOCK_UNLOCKED(init_task.alloc_lock),
|
|
.journal_info = NULL,
|
|
INIT_CPU_TIMERS(init_task)
|
|
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock),
|
|
.timer_slack_ns = 50000, /* 50 usec default slack */
|
|
.thread_pid = &init_struct_pid,
|
|
.thread_node = LIST_HEAD_INIT(init_signals.thread_head),
|
|
#ifdef CONFIG_AUDIT
|
|
.loginuid = INVALID_UID,
|
|
.sessionid = AUDIT_SID_UNSET,
|
|
#endif
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
.perf_event_mutex = __MUTEX_INITIALIZER(init_task.perf_event_mutex),
|
|
.perf_event_list = LIST_HEAD_INIT(init_task.perf_event_list),
|
|
#endif
|
|
#ifdef CONFIG_PREEMPT_RCU
|
|
.rcu_read_lock_nesting = 0,
|
|
.rcu_read_unlock_special.s = 0,
|
|
.rcu_node_entry = LIST_HEAD_INIT(init_task.rcu_node_entry),
|
|
.rcu_blocked_node = NULL,
|
|
#endif
|
|
#ifdef CONFIG_TASKS_RCU
|
|
.rcu_tasks_holdout = false,
|
|
.rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list),
|
|
.rcu_tasks_idle_cpu = -1,
|
|
.rcu_tasks_exit_list = LIST_HEAD_INIT(init_task.rcu_tasks_exit_list),
|
|
#endif
|
|
#ifdef CONFIG_TASKS_TRACE_RCU
|
|
.trc_reader_nesting = 0,
|
|
.trc_reader_special.s = 0,
|
|
.trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
|
|
.trc_blkd_node = LIST_HEAD_INIT(init_task.trc_blkd_node),
|
|
#endif
|
|
#ifdef CONFIG_CPUSETS
|
|
.mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
|
|
&init_task.alloc_lock),
|
|
#endif
|
|
#ifdef CONFIG_RT_MUTEXES
|
|
.pi_waiters = RB_ROOT_CACHED,
|
|
.pi_top_task = NULL,
|
|
#endif
|
|
INIT_PREV_CPUTIME(init_task)
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
.vtime.seqcount = SEQCNT_ZERO(init_task.vtime_seqcount),
|
|
.vtime.starttime = 0,
|
|
.vtime.state = VTIME_SYS,
|
|
#endif
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
.numa_preferred_nid = NUMA_NO_NODE,
|
|
.numa_group = NULL,
|
|
.numa_faults = NULL,
|
|
#endif
|
|
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
|
|
.kasan_depth = 1,
|
|
#endif
|
|
#ifdef CONFIG_KCSAN
|
|
.kcsan_ctx = {
|
|
.scoped_accesses = {LIST_POISON1, NULL},
|
|
},
|
|
#endif
|
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
|
.softirqs_enabled = 1,
|
|
#endif
|
|
#ifdef CONFIG_LOCKDEP
|
|
.lockdep_depth = 0, /* no locks held yet */
|
|
.curr_chain_key = INITIAL_CHAIN_KEY,
|
|
.lockdep_recursion = 0,
|
|
#endif
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
.ret_stack = NULL,
|
|
.tracing_graph_pause = ATOMIC_INIT(0),
|
|
#endif
|
|
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
|
|
.trace_recursion = 0,
|
|
#endif
|
|
#ifdef CONFIG_LIVEPATCH
|
|
.patch_state = KLP_TRANSITION_IDLE,
|
|
#endif
|
|
#ifdef CONFIG_SECURITY
|
|
.security = NULL,
|
|
#endif
|
|
#ifdef CONFIG_SECCOMP_FILTER
|
|
.seccomp = { .filter_count = ATOMIC_INIT(0) },
|
|
#endif
|
|
};
|
|
EXPORT_SYMBOL(init_task);
|
|
|
|
/*
|
|
* Initial thread structure. Alignment of this is handled by a special
|
|
* linker map entry.
|
|
*/
|
|
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
|
struct thread_info init_thread_info __init_thread_info = INIT_THREAD_INFO(init_task);
|
|
#endif
|