mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
sched/balancing: Rename load_balance() => sched_balance_rq()
Standardize scheduler load-balancing function names on the sched_balance_() prefix. Also load_balance() has become somewhat of a misnomer: historically it was the first and primary load-balancing function that was called, but with the introduction of sched domains, it's become a lower layer function that balances runqueues. Rename it to sched_balance_rq() accordingly. Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com> Link: https://lore.kernel.org/r/20240308111819.1101550-6-mingo@kernel.org
This commit is contained in:
parent
14ff4dbd34
commit
4c3e509ea9
@ -41,11 +41,11 @@ The latter function takes two arguments: the runqueue of current CPU and whether
|
||||
the CPU was idle at the time the sched_tick() happened and iterates over all
|
||||
sched domains our CPU is on, starting from its base domain and going up the ->parent
|
||||
chain. While doing that, it checks to see if the current domain has exhausted its
|
||||
rebalance interval. If so, it runs load_balance() on that domain. It then checks
|
||||
rebalance interval. If so, it runs sched_balance_rq() on that domain. It then checks
|
||||
the parent sched_domain (if it exists), and the parent of the parent and so
|
||||
forth.
|
||||
|
||||
Initially, load_balance() finds the busiest group in the current sched domain.
|
||||
Initially, sched_balance_rq() finds the busiest group in the current sched domain.
|
||||
If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in
|
||||
that group. If it manages to find such a runqueue, it locks both our initial
|
||||
CPU's runqueue and the newly found busiest one and starts moving tasks from it
|
||||
|
@ -77,53 +77,53 @@ domain<N> <cpumask> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
|
||||
|
||||
The first field is a bit mask indicating what cpus this domain operates over.
|
||||
|
||||
The next 24 are a variety of load_balance() statistics in grouped into types
|
||||
The next 24 are a variety of sched_balance_rq() statistics in grouped into types
|
||||
of idleness (idle, busy, and newly idle):
|
||||
|
||||
1) # of times in this domain load_balance() was called when the
|
||||
1) # of times in this domain sched_balance_rq() was called when the
|
||||
cpu was idle
|
||||
2) # of times in this domain load_balance() checked but found
|
||||
2) # of times in this domain sched_balance_rq() checked but found
|
||||
the load did not require balancing when the cpu was idle
|
||||
3) # of times in this domain load_balance() tried to move one or
|
||||
3) # of times in this domain sched_balance_rq() tried to move one or
|
||||
more tasks and failed, when the cpu was idle
|
||||
4) sum of imbalances discovered (if any) with each call to
|
||||
load_balance() in this domain when the cpu was idle
|
||||
sched_balance_rq() in this domain when the cpu was idle
|
||||
5) # of times in this domain pull_task() was called when the cpu
|
||||
was idle
|
||||
6) # of times in this domain pull_task() was called even though
|
||||
the target task was cache-hot when idle
|
||||
7) # of times in this domain load_balance() was called but did
|
||||
7) # of times in this domain sched_balance_rq() was called but did
|
||||
not find a busier queue while the cpu was idle
|
||||
8) # of times in this domain a busier queue was found while the
|
||||
cpu was idle but no busier group was found
|
||||
9) # of times in this domain load_balance() was called when the
|
||||
9) # of times in this domain sched_balance_rq() was called when the
|
||||
cpu was busy
|
||||
10) # of times in this domain load_balance() checked but found the
|
||||
10) # of times in this domain sched_balance_rq() checked but found the
|
||||
load did not require balancing when busy
|
||||
11) # of times in this domain load_balance() tried to move one or
|
||||
11) # of times in this domain sched_balance_rq() tried to move one or
|
||||
more tasks and failed, when the cpu was busy
|
||||
12) sum of imbalances discovered (if any) with each call to
|
||||
load_balance() in this domain when the cpu was busy
|
||||
sched_balance_rq() in this domain when the cpu was busy
|
||||
13) # of times in this domain pull_task() was called when busy
|
||||
14) # of times in this domain pull_task() was called even though the
|
||||
target task was cache-hot when busy
|
||||
15) # of times in this domain load_balance() was called but did not
|
||||
15) # of times in this domain sched_balance_rq() was called but did not
|
||||
find a busier queue while the cpu was busy
|
||||
16) # of times in this domain a busier queue was found while the cpu
|
||||
was busy but no busier group was found
|
||||
|
||||
17) # of times in this domain load_balance() was called when the
|
||||
17) # of times in this domain sched_balance_rq() was called when the
|
||||
cpu was just becoming idle
|
||||
18) # of times in this domain load_balance() checked but found the
|
||||
18) # of times in this domain sched_balance_rq() checked but found the
|
||||
load did not require balancing when the cpu was just becoming idle
|
||||
19) # of times in this domain load_balance() tried to move one or more
|
||||
19) # of times in this domain sched_balance_rq() tried to move one or more
|
||||
tasks and failed, when the cpu was just becoming idle
|
||||
20) sum of imbalances discovered (if any) with each call to
|
||||
load_balance() in this domain when the cpu was just becoming idle
|
||||
sched_balance_rq() in this domain when the cpu was just becoming idle
|
||||
21) # of times in this domain pull_task() was called when newly idle
|
||||
22) # of times in this domain pull_task() was called even though the
|
||||
target task was cache-hot when just becoming idle
|
||||
23) # of times in this domain load_balance() was called but did not
|
||||
23) # of times in this domain sched_balance_rq() was called but did not
|
||||
find a busier queue while the cpu was just becoming idle
|
||||
24) # of times in this domain a busier queue was found while the cpu
|
||||
was just becoming idle but no busier group was found
|
||||
|
@ -42,9 +42,9 @@ CPU共享。任意两个组的CPU掩码的交集不一定为空,如果是这
|
||||
后一个函数有两个入参:当前CPU的运行队列、它在sched_tick()调用时是否空闲。函数会从
|
||||
当前CPU所在的基调度域开始迭代执行,并沿着parent指针链向上进入更高层级的调度域。在迭代
|
||||
过程中,函数会检查当前调度域是否已经耗尽了再平衡的时间间隔,如果是,它在该调度域运行
|
||||
load_balance()。接下来它检查父调度域(如果存在),再后来父调度域的父调度域,以此类推。
|
||||
sched_balance_rq()。接下来它检查父调度域(如果存在),再后来父调度域的父调度域,以此类推。
|
||||
|
||||
起初,load_balance()查找当前调度域中最繁忙的调度组。如果成功,在该调度组管辖的全部CPU
|
||||
起初,sched_balance_rq()查找当前调度域中最繁忙的调度组。如果成功,在该调度组管辖的全部CPU
|
||||
的运行队列中找出最繁忙的运行队列。如能找到,对当前的CPU运行队列和新找到的最繁忙运行
|
||||
队列均加锁,并把任务从最繁忙队列中迁移到当前CPU上。被迁移的任务数量等于在先前迭代执行
|
||||
中计算出的该调度域的调度组的不均衡值。
|
||||
|
@ -75,42 +75,42 @@ domain<N> <cpumask> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
|
||||
繁忙,新空闲):
|
||||
|
||||
|
||||
1) 当CPU空闲时,load_balance()在这个调度域中被调用了#次
|
||||
2) 当CPU空闲时,load_balance()在这个调度域中被调用,但是发现负载无需
|
||||
1) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用了#次
|
||||
2) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,但是发现负载无需
|
||||
均衡#次
|
||||
3) 当CPU空闲时,load_balance()在这个调度域中被调用,试图迁移1个或更多
|
||||
3) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,试图迁移1个或更多
|
||||
任务且失败了#次
|
||||
4) 当CPU空闲时,load_balance()在这个调度域中被调用,发现不均衡(如果有)
|
||||
4) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,发现不均衡(如果有)
|
||||
#次
|
||||
5) 当CPU空闲时,pull_task()在这个调度域中被调用#次
|
||||
6) 当CPU空闲时,尽管目标任务是热缓存状态,pull_task()依然被调用#次
|
||||
7) 当CPU空闲时,load_balance()在这个调度域中被调用,未能找到更繁忙的
|
||||
7) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,未能找到更繁忙的
|
||||
队列#次
|
||||
8) 当CPU空闲时,在调度域中找到了更繁忙的队列,但未找到更繁忙的调度组
|
||||
#次
|
||||
9) 当CPU繁忙时,load_balance()在这个调度域中被调用了#次
|
||||
10) 当CPU繁忙时,load_balance()在这个调度域中被调用,但是发现负载无需
|
||||
9) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用了#次
|
||||
10) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,但是发现负载无需
|
||||
均衡#次
|
||||
11) 当CPU繁忙时,load_balance()在这个调度域中被调用,试图迁移1个或更多
|
||||
11) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,试图迁移1个或更多
|
||||
任务且失败了#次
|
||||
12) 当CPU繁忙时,load_balance()在这个调度域中被调用,发现不均衡(如果有)
|
||||
12) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,发现不均衡(如果有)
|
||||
#次
|
||||
13) 当CPU繁忙时,pull_task()在这个调度域中被调用#次
|
||||
14) 当CPU繁忙时,尽管目标任务是热缓存状态,pull_task()依然被调用#次
|
||||
15) 当CPU繁忙时,load_balance()在这个调度域中被调用,未能找到更繁忙的
|
||||
15) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,未能找到更繁忙的
|
||||
队列#次
|
||||
16) 当CPU繁忙时,在调度域中找到了更繁忙的队列,但未找到更繁忙的调度组
|
||||
#次
|
||||
17) 当CPU新空闲时,load_balance()在这个调度域中被调用了#次
|
||||
18) 当CPU新空闲时,load_balance()在这个调度域中被调用,但是发现负载无需
|
||||
17) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用了#次
|
||||
18) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,但是发现负载无需
|
||||
均衡#次
|
||||
19) 当CPU新空闲时,load_balance()在这个调度域中被调用,试图迁移1个或更多
|
||||
19) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,试图迁移1个或更多
|
||||
任务且失败了#次
|
||||
20) 当CPU新空闲时,load_balance()在这个调度域中被调用,发现不均衡(如果有)
|
||||
20) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,发现不均衡(如果有)
|
||||
#次
|
||||
21) 当CPU新空闲时,pull_task()在这个调度域中被调用#次
|
||||
22) 当CPU新空闲时,尽管目标任务是热缓存状态,pull_task()依然被调用#次
|
||||
23) 当CPU新空闲时,load_balance()在这个调度域中被调用,未能找到更繁忙的
|
||||
23) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,未能找到更繁忙的
|
||||
队列#次
|
||||
24) 当CPU新空闲时,在调度域中找到了更繁忙的队列,但未找到更繁忙的调度组
|
||||
#次
|
||||
|
@ -110,7 +110,7 @@ struct sched_domain {
|
||||
unsigned long last_decay_max_lb_cost;
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
/* load_balance() stats */
|
||||
/* sched_balance_rq() stats */
|
||||
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
|
||||
unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
|
||||
unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
|
||||
|
@ -6866,7 +6866,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/* Working cpumask for: load_balance, load_balance_newidle. */
|
||||
/* Working cpumask for: sched_balance_rq, load_balance_newidle. */
|
||||
static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
|
||||
static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask);
|
||||
static DEFINE_PER_CPU(cpumask_var_t, should_we_balance_tmpmask);
|
||||
@ -11242,7 +11242,7 @@ static int should_we_balance(struct lb_env *env)
|
||||
* Check this_cpu to ensure it is balanced within domain. Attempt to move
|
||||
* tasks if there is an imbalance.
|
||||
*/
|
||||
static int load_balance(int this_cpu, struct rq *this_rq,
|
||||
static int sched_balance_rq(int this_cpu, struct rq *this_rq,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *continue_balancing)
|
||||
{
|
||||
@ -11647,7 +11647,7 @@ static int active_load_balance_cpu_stop(void *data)
|
||||
static atomic_t sched_balance_running = ATOMIC_INIT(0);
|
||||
|
||||
/*
|
||||
* Scale the max load_balance interval with the number of CPUs in the system.
|
||||
* Scale the max sched_balance_rq interval with the number of CPUs in the system.
|
||||
* This trades load-balance latency on larger machines for less cross talk.
|
||||
*/
|
||||
void update_max_interval(void)
|
||||
@ -11727,7 +11727,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
|
||||
}
|
||||
|
||||
if (time_after_eq(jiffies, sd->last_balance + interval)) {
|
||||
if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
|
||||
if (sched_balance_rq(cpu, rq, sd, idle, &continue_balancing)) {
|
||||
/*
|
||||
* The LBF_DST_PINNED logic could have changed
|
||||
* env->dst_cpu, so we can't know our idle
|
||||
@ -12353,7 +12353,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
|
||||
|
||||
if (sd->flags & SD_BALANCE_NEWIDLE) {
|
||||
|
||||
pulled_task = load_balance(this_cpu, this_rq,
|
||||
pulled_task = sched_balance_rq(this_cpu, this_rq,
|
||||
sd, CPU_NEWLY_IDLE,
|
||||
&continue_balancing);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user