mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
sched/fair: Merge for each idle cpu loop of ILB
Remove the specific case for handling this_cpu outside for_each_cpu() loop when running ILB. Instead we use for_each_cpu_wrap() and start with the next cpu after this_cpu so we will continue to finish with this_cpu. update_nohz_stats() is now used for this_cpu too and will prevents unnecessary update. We don't need a special case for handling the update of nohz.next_balance for this_cpu anymore because it is now handled by the loop like others. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Link: https://lkml.kernel.org/r/20210224133007.28644-5-vincent.guittot@linaro.org
This commit is contained in:
parent
64f84f2735
commit
7a82e5f52a
@ -10043,22 +10043,9 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
|
||||
* When the cpu is attached to null domain for ex, it will not be
|
||||
* updated.
|
||||
*/
|
||||
if (likely(update_next_balance)) {
|
||||
if (likely(update_next_balance))
|
||||
rq->next_balance = next_balance;
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
/*
|
||||
* If this CPU has been elected to perform the nohz idle
|
||||
* balance. Other idle CPUs have already rebalanced with
|
||||
* nohz_idle_balance() and nohz.next_balance has been
|
||||
* updated accordingly. This CPU is now running the idle load
|
||||
* balance for itself and we need to update the
|
||||
* nohz.next_balance accordingly.
|
||||
*/
|
||||
if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
|
||||
nohz.next_balance = rq->next_balance;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static inline int on_null_domain(struct rq *rq)
|
||||
@ -10385,8 +10372,12 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
|
||||
if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
|
||||
/*
|
||||
* Start with the next CPU after this_cpu so we will end with this_cpu and let a
|
||||
* chance for other idle cpu to pull load.
|
||||
*/
|
||||
for_each_cpu_wrap(balance_cpu, nohz.idle_cpus_mask, this_cpu+1) {
|
||||
if (!idle_cpu(balance_cpu))
|
||||
continue;
|
||||
|
||||
/*
|
||||
@ -10432,15 +10423,6 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
|
||||
if (likely(update_next_balance))
|
||||
nohz.next_balance = next_balance;
|
||||
|
||||
/* Newly idle CPU doesn't need an update */
|
||||
if (idle != CPU_NEWLY_IDLE) {
|
||||
update_blocked_averages(this_cpu);
|
||||
has_blocked_load |= this_rq->has_blocked_load;
|
||||
}
|
||||
|
||||
if (flags & NOHZ_BALANCE_KICK)
|
||||
rebalance_domains(this_rq, CPU_IDLE);
|
||||
|
||||
WRITE_ONCE(nohz.next_blocked,
|
||||
now + msecs_to_jiffies(LOAD_AVG_PERIOD));
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user