mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
sched/fair: Rename cfs_rq.idle_h_nr_running into h_nr_idle
Use same naming convention as others starting with h_nr_* and rename idle_h_nr_running into h_nr_idle. The "running" is not correct anymore as it includes delayed dequeue tasks as well. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Link: https://lore.kernel.org/r/20241202174606.4074512-8-vincent.guittot@linaro.org
This commit is contained in:
parent
9216582b0b
commit
31898e7b87
@ -848,8 +848,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
|
||||
cfs_rq->idle_nr_running);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
|
||||
cfs_rq->idle_h_nr_running);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle);
|
||||
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
|
||||
#ifdef CONFIG_SMP
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
|
||||
|
@ -5930,7 +5930,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
|
||||
struct sched_entity *se;
|
||||
long queued_delta, runnable_delta, idle_task_delta, dequeue = 1;
|
||||
long queued_delta, runnable_delta, idle_delta, dequeue = 1;
|
||||
long rq_h_nr_queued = rq->cfs.h_nr_queued;
|
||||
|
||||
raw_spin_lock(&cfs_b->lock);
|
||||
@ -5963,7 +5963,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
|
||||
queued_delta = cfs_rq->h_nr_queued;
|
||||
runnable_delta = cfs_rq->h_nr_runnable;
|
||||
idle_task_delta = cfs_rq->idle_h_nr_running;
|
||||
idle_delta = cfs_rq->h_nr_idle;
|
||||
for_each_sched_entity(se) {
|
||||
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
|
||||
int flags;
|
||||
@ -5983,11 +5983,11 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
dequeue_entity(qcfs_rq, se, flags);
|
||||
|
||||
if (cfs_rq_is_idle(group_cfs_rq(se)))
|
||||
idle_task_delta = cfs_rq->h_nr_queued;
|
||||
idle_delta = cfs_rq->h_nr_queued;
|
||||
|
||||
qcfs_rq->h_nr_queued -= queued_delta;
|
||||
qcfs_rq->h_nr_runnable -= runnable_delta;
|
||||
qcfs_rq->idle_h_nr_running -= idle_task_delta;
|
||||
qcfs_rq->h_nr_idle -= idle_delta;
|
||||
|
||||
if (qcfs_rq->load.weight) {
|
||||
/* Avoid re-evaluating load for this entity: */
|
||||
@ -6006,11 +6006,11 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
se_update_runnable(se);
|
||||
|
||||
if (cfs_rq_is_idle(group_cfs_rq(se)))
|
||||
idle_task_delta = cfs_rq->h_nr_queued;
|
||||
idle_delta = cfs_rq->h_nr_queued;
|
||||
|
||||
qcfs_rq->h_nr_queued -= queued_delta;
|
||||
qcfs_rq->h_nr_runnable -= runnable_delta;
|
||||
qcfs_rq->idle_h_nr_running -= idle_task_delta;
|
||||
qcfs_rq->h_nr_idle -= idle_delta;
|
||||
}
|
||||
|
||||
/* At this point se is NULL and we are at root level*/
|
||||
@ -6036,7 +6036,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
|
||||
struct sched_entity *se;
|
||||
long queued_delta, runnable_delta, idle_task_delta;
|
||||
long queued_delta, runnable_delta, idle_delta;
|
||||
long rq_h_nr_queued = rq->cfs.h_nr_queued;
|
||||
|
||||
se = cfs_rq->tg->se[cpu_of(rq)];
|
||||
@ -6072,7 +6072,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
|
||||
queued_delta = cfs_rq->h_nr_queued;
|
||||
runnable_delta = cfs_rq->h_nr_runnable;
|
||||
idle_task_delta = cfs_rq->idle_h_nr_running;
|
||||
idle_delta = cfs_rq->h_nr_idle;
|
||||
for_each_sched_entity(se) {
|
||||
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
|
||||
|
||||
@ -6086,11 +6086,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP);
|
||||
|
||||
if (cfs_rq_is_idle(group_cfs_rq(se)))
|
||||
idle_task_delta = cfs_rq->h_nr_queued;
|
||||
idle_delta = cfs_rq->h_nr_queued;
|
||||
|
||||
qcfs_rq->h_nr_queued += queued_delta;
|
||||
qcfs_rq->h_nr_runnable += runnable_delta;
|
||||
qcfs_rq->idle_h_nr_running += idle_task_delta;
|
||||
qcfs_rq->h_nr_idle += idle_delta;
|
||||
|
||||
/* end evaluation on encountering a throttled cfs_rq */
|
||||
if (cfs_rq_throttled(qcfs_rq))
|
||||
@ -6104,11 +6104,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
se_update_runnable(se);
|
||||
|
||||
if (cfs_rq_is_idle(group_cfs_rq(se)))
|
||||
idle_task_delta = cfs_rq->h_nr_queued;
|
||||
idle_delta = cfs_rq->h_nr_queued;
|
||||
|
||||
qcfs_rq->h_nr_queued += queued_delta;
|
||||
qcfs_rq->h_nr_runnable += runnable_delta;
|
||||
qcfs_rq->idle_h_nr_running += idle_task_delta;
|
||||
qcfs_rq->h_nr_idle += idle_delta;
|
||||
|
||||
/* end evaluation on encountering a throttled cfs_rq */
|
||||
if (cfs_rq_throttled(qcfs_rq))
|
||||
@ -6918,7 +6918,7 @@ static inline void check_update_overutilized_status(struct rq *rq) { }
|
||||
/* Runqueue only has SCHED_IDLE tasks enqueued */
|
||||
static int sched_idle_rq(struct rq *rq)
|
||||
{
|
||||
return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running &&
|
||||
return unlikely(rq->nr_running == rq->cfs.h_nr_idle &&
|
||||
rq->nr_running);
|
||||
}
|
||||
|
||||
@ -6970,7 +6970,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
{
|
||||
struct cfs_rq *cfs_rq;
|
||||
struct sched_entity *se = &p->se;
|
||||
int idle_h_nr_running = task_has_idle_policy(p);
|
||||
int h_nr_idle = task_has_idle_policy(p);
|
||||
int h_nr_runnable = 1;
|
||||
int task_new = !(flags & ENQUEUE_WAKEUP);
|
||||
int rq_h_nr_queued = rq->cfs.h_nr_queued;
|
||||
@ -7023,10 +7023,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
||||
cfs_rq->h_nr_runnable += h_nr_runnable;
|
||||
cfs_rq->h_nr_queued++;
|
||||
cfs_rq->idle_h_nr_running += idle_h_nr_running;
|
||||
cfs_rq->h_nr_idle += h_nr_idle;
|
||||
|
||||
if (cfs_rq_is_idle(cfs_rq))
|
||||
idle_h_nr_running = 1;
|
||||
h_nr_idle = 1;
|
||||
|
||||
/* end evaluation on encountering a throttled cfs_rq */
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
@ -7047,10 +7047,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
||||
cfs_rq->h_nr_runnable += h_nr_runnable;
|
||||
cfs_rq->h_nr_queued++;
|
||||
cfs_rq->idle_h_nr_running += idle_h_nr_running;
|
||||
cfs_rq->h_nr_idle += h_nr_idle;
|
||||
|
||||
if (cfs_rq_is_idle(cfs_rq))
|
||||
idle_h_nr_running = 1;
|
||||
h_nr_idle = 1;
|
||||
|
||||
/* end evaluation on encountering a throttled cfs_rq */
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
@ -7108,7 +7108,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
|
||||
bool task_sleep = flags & DEQUEUE_SLEEP;
|
||||
bool task_delayed = flags & DEQUEUE_DELAYED;
|
||||
struct task_struct *p = NULL;
|
||||
int idle_h_nr_running = 0;
|
||||
int h_nr_idle = 0;
|
||||
int h_nr_queued = 0;
|
||||
int h_nr_runnable = 0;
|
||||
struct cfs_rq *cfs_rq;
|
||||
@ -7117,7 +7117,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
|
||||
if (entity_is_task(se)) {
|
||||
p = task_of(se);
|
||||
h_nr_queued = 1;
|
||||
idle_h_nr_running = task_has_idle_policy(p);
|
||||
h_nr_idle = task_has_idle_policy(p);
|
||||
if (task_sleep || task_delayed || !se->sched_delayed)
|
||||
h_nr_runnable = 1;
|
||||
} else {
|
||||
@ -7137,10 +7137,10 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
|
||||
|
||||
cfs_rq->h_nr_runnable -= h_nr_runnable;
|
||||
cfs_rq->h_nr_queued -= h_nr_queued;
|
||||
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
|
||||
cfs_rq->h_nr_idle -= h_nr_idle;
|
||||
|
||||
if (cfs_rq_is_idle(cfs_rq))
|
||||
idle_h_nr_running = h_nr_queued;
|
||||
h_nr_idle = h_nr_queued;
|
||||
|
||||
/* end evaluation on encountering a throttled cfs_rq */
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
@ -7176,10 +7176,10 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
|
||||
|
||||
cfs_rq->h_nr_runnable -= h_nr_runnable;
|
||||
cfs_rq->h_nr_queued -= h_nr_queued;
|
||||
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
|
||||
cfs_rq->h_nr_idle -= h_nr_idle;
|
||||
|
||||
if (cfs_rq_is_idle(cfs_rq))
|
||||
idle_h_nr_running = h_nr_queued;
|
||||
h_nr_idle = h_nr_queued;
|
||||
|
||||
/* end evaluation on encountering a throttled cfs_rq */
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
@ -13527,7 +13527,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
|
||||
}
|
||||
|
||||
idle_task_delta = grp_cfs_rq->h_nr_queued -
|
||||
grp_cfs_rq->idle_h_nr_running;
|
||||
grp_cfs_rq->h_nr_idle;
|
||||
if (!cfs_rq_is_idle(grp_cfs_rq))
|
||||
idle_task_delta *= -1;
|
||||
|
||||
@ -13537,7 +13537,7 @@ int sched_group_set_idle(struct task_group *tg, long idle)
|
||||
if (!se->on_rq)
|
||||
break;
|
||||
|
||||
cfs_rq->idle_h_nr_running += idle_task_delta;
|
||||
cfs_rq->h_nr_idle += idle_task_delta;
|
||||
|
||||
/* Already accounted at parent level and above. */
|
||||
if (cfs_rq_is_idle(cfs_rq))
|
||||
|
@ -649,7 +649,7 @@ struct cfs_rq {
|
||||
unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */
|
||||
unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */
|
||||
unsigned int idle_nr_running; /* SCHED_IDLE */
|
||||
unsigned int idle_h_nr_running; /* SCHED_IDLE */
|
||||
unsigned int h_nr_idle; /* SCHED_IDLE */
|
||||
|
||||
s64 avg_vruntime;
|
||||
u64 avg_load;
|
||||
|
Loading…
Reference in New Issue
Block a user