mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
sched/balancing: Rename scheduler_tick() => sched_tick()
- Standardize on prefixing scheduler-internal functions defined in <linux/sched.h> with sched_*() prefix. scheduler_tick() was the only function using the scheduler_ prefix. Harmonize it. - The other reason to rename it is the NOHZ scheduler tick handling functions are already named sched_tick_*(). Make the 'git grep sched_tick' more meaningful. Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Valentin Schneider <vschneid@redhat.com> Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com> Link: https://lore.kernel.org/r/20240308111819.1101550-3-mingo@kernel.org
This commit is contained in:
parent
70a27d6d1b
commit
86dd6c04ef
@ -32,13 +32,13 @@ load of each of its member CPUs, and only when the load of a group becomes
|
||||
out of balance are tasks moved between groups.
|
||||
|
||||
In kernel/sched/core.c, trigger_load_balance() is run periodically on each CPU
|
||||
through scheduler_tick(). It raises a softirq after the next regularly scheduled
|
||||
through sched_tick(). It raises a softirq after the next regularly scheduled
|
||||
rebalancing event for the current runqueue has arrived. The actual load
|
||||
balancing workhorse, sched_balance_softirq()->rebalance_domains(), is then run
|
||||
in softirq context (SCHED_SOFTIRQ).
|
||||
|
||||
The latter function takes two arguments: the runqueue of current CPU and whether
|
||||
the CPU was idle at the time the scheduler_tick() happened and iterates over all
|
||||
the CPU was idle at the time the sched_tick() happened and iterates over all
|
||||
sched domains our CPU is on, starting from its base domain and going up the ->parent
|
||||
chain. While doing that, it checks to see if the current domain has exhausted its
|
||||
rebalance interval. If so, it runs load_balance() on that domain. It then checks
|
||||
|
@ -34,12 +34,12 @@ CPU共享。任意两个组的CPU掩码的交集不一定为空,如果是这
|
||||
调度域中的负载均衡发生在调度组中。也就是说,每个组被视为一个实体。组的负载被定义为它
|
||||
管辖的每个CPU的负载之和。仅当组的负载不均衡后,任务才在组之间发生迁移。
|
||||
|
||||
在kernel/sched/core.c中,trigger_load_balance()在每个CPU上通过scheduler_tick()
|
||||
在kernel/sched/core.c中,trigger_load_balance()在每个CPU上通过sched_tick()
|
||||
周期执行。在当前运行队列下一个定期调度再平衡事件到达后,它引发一个软中断。负载均衡真正
|
||||
的工作由sched_balance_softirq()->rebalance_domains()完成,在软中断上下文中执行
|
||||
(SCHED_SOFTIRQ)。
|
||||
|
||||
后一个函数有两个入参:当前CPU的运行队列、它在scheduler_tick()调用时是否空闲。函数会从
|
||||
后一个函数有两个入参:当前CPU的运行队列、它在sched_tick()调用时是否空闲。函数会从
|
||||
当前CPU所在的基调度域开始迭代执行,并沿着parent指针链向上进入更高层级的调度域。在迭代
|
||||
过程中,函数会检查当前调度域是否已经耗尽了再平衡的时间间隔,如果是,它在该调度域运行
|
||||
load_balance()。接下来它检查父调度域(如果存在),再后来父调度域的父调度域,以此类推。
|
||||
|
@ -301,7 +301,7 @@ enum {
|
||||
TASK_COMM_LEN = 16,
|
||||
};
|
||||
|
||||
extern void scheduler_tick(void);
|
||||
extern void sched_tick(void);
|
||||
|
||||
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
|
||||
|
||||
|
@ -5662,7 +5662,7 @@ static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
|
||||
* This function gets called by the timer code, with HZ frequency.
|
||||
* We call it with interrupts disabled.
|
||||
*/
|
||||
void scheduler_tick(void)
|
||||
void sched_tick(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
@ -6585,7 +6585,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||
* paths. For example, see arch/x86/entry_64.S.
|
||||
*
|
||||
* To drive preemption between tasks, the scheduler sets the flag in timer
|
||||
* interrupt handler scheduler_tick().
|
||||
* interrupt handler sched_tick().
|
||||
*
|
||||
* 3. Wakeups don't really cause entry into schedule(). They add a
|
||||
* task to the run-queue and that's it.
|
||||
|
@ -379,7 +379,7 @@ void calc_global_load(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from scheduler_tick() to periodically update this CPU's
|
||||
* Called from sched_tick() to periodically update this CPU's
|
||||
* active count.
|
||||
*/
|
||||
void calc_global_load_tick(struct rq *this_rq)
|
||||
|
@ -2478,7 +2478,7 @@ void update_process_times(int user_tick)
|
||||
if (in_irq())
|
||||
irq_work_tick();
|
||||
#endif
|
||||
scheduler_tick();
|
||||
sched_tick();
|
||||
if (IS_ENABLED(CONFIG_POSIX_TIMERS))
|
||||
run_posix_cpu_timers();
|
||||
}
|
||||
|
@ -1464,7 +1464,7 @@ void wq_worker_sleeping(struct task_struct *task)
|
||||
* wq_worker_tick - a scheduler tick occurred while a kworker is running
|
||||
* @task: task currently running
|
||||
*
|
||||
* Called from scheduler_tick(). We're in the IRQ context and the current
|
||||
* Called from sched_tick(). We're in the IRQ context and the current
|
||||
* worker's fields which follow the 'K' locking rule can be accessed safely.
|
||||
*/
|
||||
void wq_worker_tick(struct task_struct *task)
|
||||
|
@ -19,7 +19,7 @@ fail() { # mesg
|
||||
|
||||
FILTER=set_ftrace_filter
|
||||
FUNC1="schedule"
|
||||
FUNC2="scheduler_tick"
|
||||
FUNC2="sched_tick"
|
||||
|
||||
ALL_FUNCS="#### all functions enabled ####"
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user