mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 18:52:02 +00:00
sched/fair: Rename cfs_rq.nr_running into nr_queued
Rename cfs_rq.nr_running into cfs_rq.nr_queued which better reflects the reality as the value includes both the ready to run tasks and the delayed dequeue tasks. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Link: https://lore.kernel.org/r/20241202174606.4074512-10-vincent.guittot@linaro.org
This commit is contained in:
parent
43eef7c3a4
commit
736c55a02c
@ -843,7 +843,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
SPLIT_NS(right_vruntime));
|
||||
spread = right_vruntime - left_vruntime;
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
|
||||
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);
|
||||
|
@ -915,7 +915,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
|
||||
* We can safely skip eligibility check if there is only one entity
|
||||
* in this cfs_rq, saving some cycles.
|
||||
*/
|
||||
if (cfs_rq->nr_running == 1)
|
||||
if (cfs_rq->nr_queued == 1)
|
||||
return curr && curr->on_rq ? curr : se;
|
||||
|
||||
if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
|
||||
@ -1247,7 +1247,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
|
||||
|
||||
account_cfs_rq_runtime(cfs_rq, delta_exec);
|
||||
|
||||
if (cfs_rq->nr_running == 1)
|
||||
if (cfs_rq->nr_queued == 1)
|
||||
return;
|
||||
|
||||
if (resched || did_preempt_short(cfs_rq, curr)) {
|
||||
@ -3673,7 +3673,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
list_add(&se->group_node, &rq->cfs_tasks);
|
||||
}
|
||||
#endif
|
||||
cfs_rq->nr_running++;
|
||||
cfs_rq->nr_queued++;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3686,7 +3686,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
list_del_init(&se->group_node);
|
||||
}
|
||||
#endif
|
||||
cfs_rq->nr_running--;
|
||||
cfs_rq->nr_queued--;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5220,7 +5220,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
|
||||
|
||||
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
return !cfs_rq->nr_running;
|
||||
return !cfs_rq->nr_queued;
|
||||
}
|
||||
|
||||
#define UPDATE_TG 0x0
|
||||
@ -5276,7 +5276,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
*
|
||||
* EEVDF: placement strategy #1 / #2
|
||||
*/
|
||||
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running && se->vlag) {
|
||||
if (sched_feat(PLACE_LAG) && cfs_rq->nr_queued && se->vlag) {
|
||||
struct sched_entity *curr = cfs_rq->curr;
|
||||
unsigned long load;
|
||||
|
||||
@ -5423,7 +5423,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
__enqueue_entity(cfs_rq, se);
|
||||
se->on_rq = 1;
|
||||
|
||||
if (cfs_rq->nr_running == 1) {
|
||||
if (cfs_rq->nr_queued == 1) {
|
||||
check_enqueue_throttle(cfs_rq);
|
||||
if (!throttled_hierarchy(cfs_rq)) {
|
||||
list_add_leaf_cfs_rq(cfs_rq);
|
||||
@ -5565,7 +5565,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
if (flags & DEQUEUE_DELAYED)
|
||||
finish_delayed_dequeue_entity(se);
|
||||
|
||||
if (cfs_rq->nr_running == 0)
|
||||
if (cfs_rq->nr_queued == 0)
|
||||
update_idle_cfs_rq_clock_pelt(cfs_rq);
|
||||
|
||||
return true;
|
||||
@ -5913,7 +5913,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
|
||||
list_del_leaf_cfs_rq(cfs_rq);
|
||||
|
||||
SCHED_WARN_ON(cfs_rq->throttled_clock_self);
|
||||
if (cfs_rq->nr_running)
|
||||
if (cfs_rq->nr_queued)
|
||||
cfs_rq->throttled_clock_self = rq_clock(rq);
|
||||
}
|
||||
cfs_rq->throttle_count++;
|
||||
@ -6022,7 +6022,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
*/
|
||||
cfs_rq->throttled = 1;
|
||||
SCHED_WARN_ON(cfs_rq->throttled_clock);
|
||||
if (cfs_rq->nr_running)
|
||||
if (cfs_rq->nr_queued)
|
||||
cfs_rq->throttled_clock = rq_clock(rq);
|
||||
return true;
|
||||
}
|
||||
@ -6122,7 +6122,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
assert_list_leaf_cfs_rq(rq);
|
||||
|
||||
/* Determine whether we need to wake up potentially idle CPU: */
|
||||
if (rq->curr == rq->idle && rq->cfs.nr_running)
|
||||
if (rq->curr == rq->idle && rq->cfs.nr_queued)
|
||||
resched_curr(rq);
|
||||
}
|
||||
|
||||
@ -6423,7 +6423,7 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
|
||||
if (!cfs_bandwidth_used())
|
||||
return;
|
||||
|
||||
if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
|
||||
if (!cfs_rq->runtime_enabled || cfs_rq->nr_queued)
|
||||
return;
|
||||
|
||||
__return_cfs_rq_runtime(cfs_rq);
|
||||
@ -6941,14 +6941,14 @@ requeue_delayed_entity(struct sched_entity *se)
|
||||
if (sched_feat(DELAY_ZERO)) {
|
||||
update_entity_lag(cfs_rq, se);
|
||||
if (se->vlag > 0) {
|
||||
cfs_rq->nr_running--;
|
||||
cfs_rq->nr_queued--;
|
||||
if (se != cfs_rq->curr)
|
||||
__dequeue_entity(cfs_rq, se);
|
||||
se->vlag = 0;
|
||||
place_entity(cfs_rq, se, 0);
|
||||
if (se != cfs_rq->curr)
|
||||
__enqueue_entity(cfs_rq, se);
|
||||
cfs_rq->nr_running++;
|
||||
cfs_rq->nr_queued++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -8873,7 +8873,7 @@ static struct task_struct *pick_task_fair(struct rq *rq)
|
||||
|
||||
again:
|
||||
cfs_rq = &rq->cfs;
|
||||
if (!cfs_rq->nr_running)
|
||||
if (!cfs_rq->nr_queued)
|
||||
return NULL;
|
||||
|
||||
do {
|
||||
@ -8990,7 +8990,7 @@ static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_stru
|
||||
|
||||
static bool fair_server_has_tasks(struct sched_dl_entity *dl_se)
|
||||
{
|
||||
return !!dl_se->rq->cfs.nr_running;
|
||||
return !!dl_se->rq->cfs.nr_queued;
|
||||
}
|
||||
|
||||
static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
|
||||
@ -9780,7 +9780,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
|
||||
if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) {
|
||||
update_tg_load_avg(cfs_rq);
|
||||
|
||||
if (cfs_rq->nr_running == 0)
|
||||
if (cfs_rq->nr_queued == 0)
|
||||
update_idle_cfs_rq_clock_pelt(cfs_rq);
|
||||
|
||||
if (cfs_rq == &rq->cfs)
|
||||
@ -12949,7 +12949,7 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
|
||||
* MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
|
||||
* if we need to give up the CPU.
|
||||
*/
|
||||
if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 &&
|
||||
if (rq->core->core_forceidle_count && rq->cfs.nr_queued == 1 &&
|
||||
__entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
|
||||
resched_curr(rq);
|
||||
}
|
||||
@ -13093,7 +13093,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
if (!task_on_rq_queued(p))
|
||||
return;
|
||||
|
||||
if (rq->cfs.nr_running == 1)
|
||||
if (rq->cfs.nr_queued == 1)
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -645,7 +645,7 @@ struct balance_callback {
|
||||
/* CFS-related fields in a runqueue */
|
||||
struct cfs_rq {
|
||||
struct load_weight load;
|
||||
unsigned int nr_running;
|
||||
unsigned int nr_queued;
|
||||
unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */
|
||||
unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
|
||||
unsigned int h_nr_idle; /* SCHED_IDLE */
|
||||
@ -2565,7 +2565,7 @@ static inline bool sched_rt_runnable(struct rq *rq)
|
||||
|
||||
static inline bool sched_fair_runnable(struct rq *rq)
|
||||
{
|
||||
return rq->cfs.nr_running > 0;
|
||||
return rq->cfs.nr_queued > 0;
|
||||
}
|
||||
|
||||
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
|
||||
|
Loading…
Reference in New Issue
Block a user