mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
sched/eevdf: More PELT vs DELAYED_DEQUEUE
Vincent and Dietmar noted that while commitfc1892becd
("sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE") fixes the entity runnable stats, it does not adjust the cfs_rq runnable stats, which are based off of h_nr_running. Track h_nr_delayed such that we can discount those and adjust the signal. Fixes:fc1892becd
("sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE") Closes: https://lore.kernel.org/lkml/a9a45193-d0c6-4ba2-a822-464ad30b550e@arm.com/ Closes: https://lore.kernel.org/lkml/CAKfTPtCNUvWE_GX5LyvTF-WdxUT=ZgvZZv-4t=eWntg5uOFqiQ@mail.gmail.com/ [ Fixes checkpatch warnings and rebased ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reported-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Reported-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org> Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Link: https://lore.kernel.org/r/20241202174606.4074512-3-vincent.guittot@linaro.org
This commit is contained in:
parent
c1f43c342e
commit
76f2f78329
@ -845,6 +845,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
|
||||
SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running",
|
||||
cfs_rq->idle_nr_running);
|
||||
SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running",
|
||||
|
@ -5465,9 +5465,33 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
|
||||
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
|
||||
|
||||
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
|
||||
static void set_delayed(struct sched_entity *se)
|
||||
{
|
||||
se->sched_delayed = 1;
|
||||
for_each_sched_entity(se) {
|
||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||
|
||||
cfs_rq->h_nr_delayed++;
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void clear_delayed(struct sched_entity *se)
|
||||
{
|
||||
se->sched_delayed = 0;
|
||||
for_each_sched_entity(se) {
|
||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||
|
||||
cfs_rq->h_nr_delayed--;
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void finish_delayed_dequeue_entity(struct sched_entity *se)
|
||||
{
|
||||
clear_delayed(se);
|
||||
if (sched_feat(DELAY_ZERO) && se->vlag > 0)
|
||||
se->vlag = 0;
|
||||
}
|
||||
@ -5496,7 +5520,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
if (sched_feat(DELAY_DEQUEUE) && delay &&
|
||||
!entity_eligible(cfs_rq, se)) {
|
||||
update_load_avg(cfs_rq, se, 0);
|
||||
se->sched_delayed = 1;
|
||||
set_delayed(se);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -5908,7 +5932,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
|
||||
struct sched_entity *se;
|
||||
long task_delta, idle_task_delta, dequeue = 1;
|
||||
long task_delta, idle_task_delta, delayed_delta, dequeue = 1;
|
||||
long rq_h_nr_running = rq->cfs.h_nr_running;
|
||||
|
||||
raw_spin_lock(&cfs_b->lock);
|
||||
@ -5941,6 +5965,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
|
||||
task_delta = cfs_rq->h_nr_running;
|
||||
idle_task_delta = cfs_rq->idle_h_nr_running;
|
||||
delayed_delta = cfs_rq->h_nr_delayed;
|
||||
for_each_sched_entity(se) {
|
||||
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
|
||||
int flags;
|
||||
@ -5964,6 +5989,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
|
||||
qcfs_rq->h_nr_running -= task_delta;
|
||||
qcfs_rq->idle_h_nr_running -= idle_task_delta;
|
||||
qcfs_rq->h_nr_delayed -= delayed_delta;
|
||||
|
||||
if (qcfs_rq->load.weight) {
|
||||
/* Avoid re-evaluating load for this entity: */
|
||||
@ -5986,6 +6012,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
|
||||
qcfs_rq->h_nr_running -= task_delta;
|
||||
qcfs_rq->idle_h_nr_running -= idle_task_delta;
|
||||
qcfs_rq->h_nr_delayed -= delayed_delta;
|
||||
}
|
||||
|
||||
/* At this point se is NULL and we are at root level*/
|
||||
@ -6011,7 +6038,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
|
||||
struct sched_entity *se;
|
||||
long task_delta, idle_task_delta;
|
||||
long task_delta, idle_task_delta, delayed_delta;
|
||||
long rq_h_nr_running = rq->cfs.h_nr_running;
|
||||
|
||||
se = cfs_rq->tg->se[cpu_of(rq)];
|
||||
@ -6047,6 +6074,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
|
||||
task_delta = cfs_rq->h_nr_running;
|
||||
idle_task_delta = cfs_rq->idle_h_nr_running;
|
||||
delayed_delta = cfs_rq->h_nr_delayed;
|
||||
for_each_sched_entity(se) {
|
||||
struct cfs_rq *qcfs_rq = cfs_rq_of(se);
|
||||
|
||||
@ -6064,6 +6092,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
|
||||
qcfs_rq->h_nr_running += task_delta;
|
||||
qcfs_rq->idle_h_nr_running += idle_task_delta;
|
||||
qcfs_rq->h_nr_delayed += delayed_delta;
|
||||
|
||||
/* end evaluation on encountering a throttled cfs_rq */
|
||||
if (cfs_rq_throttled(qcfs_rq))
|
||||
@ -6081,6 +6110,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
|
||||
qcfs_rq->h_nr_running += task_delta;
|
||||
qcfs_rq->idle_h_nr_running += idle_task_delta;
|
||||
qcfs_rq->h_nr_delayed += delayed_delta;
|
||||
|
||||
/* end evaluation on encountering a throttled cfs_rq */
|
||||
if (cfs_rq_throttled(qcfs_rq))
|
||||
@ -6934,7 +6964,7 @@ requeue_delayed_entity(struct sched_entity *se)
|
||||
}
|
||||
|
||||
update_load_avg(cfs_rq, se, 0);
|
||||
se->sched_delayed = 0;
|
||||
clear_delayed(se);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -6948,6 +6978,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
struct cfs_rq *cfs_rq;
|
||||
struct sched_entity *se = &p->se;
|
||||
int idle_h_nr_running = task_has_idle_policy(p);
|
||||
int h_nr_delayed = 0;
|
||||
int task_new = !(flags & ENQUEUE_WAKEUP);
|
||||
int rq_h_nr_running = rq->cfs.h_nr_running;
|
||||
u64 slice = 0;
|
||||
@ -6974,6 +7005,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (p->in_iowait)
|
||||
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
|
||||
|
||||
if (task_new)
|
||||
h_nr_delayed = !!se->sched_delayed;
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
if (se->on_rq) {
|
||||
if (se->sched_delayed)
|
||||
@ -6996,6 +7030,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
||||
cfs_rq->h_nr_running++;
|
||||
cfs_rq->idle_h_nr_running += idle_h_nr_running;
|
||||
cfs_rq->h_nr_delayed += h_nr_delayed;
|
||||
|
||||
if (cfs_rq_is_idle(cfs_rq))
|
||||
idle_h_nr_running = 1;
|
||||
@ -7019,6 +7054,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
||||
cfs_rq->h_nr_running++;
|
||||
cfs_rq->idle_h_nr_running += idle_h_nr_running;
|
||||
cfs_rq->h_nr_delayed += h_nr_delayed;
|
||||
|
||||
if (cfs_rq_is_idle(cfs_rq))
|
||||
idle_h_nr_running = 1;
|
||||
@ -7081,6 +7117,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
|
||||
struct task_struct *p = NULL;
|
||||
int idle_h_nr_running = 0;
|
||||
int h_nr_running = 0;
|
||||
int h_nr_delayed = 0;
|
||||
struct cfs_rq *cfs_rq;
|
||||
u64 slice = 0;
|
||||
|
||||
@ -7088,6 +7125,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
|
||||
p = task_of(se);
|
||||
h_nr_running = 1;
|
||||
idle_h_nr_running = task_has_idle_policy(p);
|
||||
if (!task_sleep && !task_delayed)
|
||||
h_nr_delayed = !!se->sched_delayed;
|
||||
} else {
|
||||
cfs_rq = group_cfs_rq(se);
|
||||
slice = cfs_rq_min_slice(cfs_rq);
|
||||
@ -7105,6 +7144,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
|
||||
|
||||
cfs_rq->h_nr_running -= h_nr_running;
|
||||
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
|
||||
cfs_rq->h_nr_delayed -= h_nr_delayed;
|
||||
|
||||
if (cfs_rq_is_idle(cfs_rq))
|
||||
idle_h_nr_running = h_nr_running;
|
||||
@ -7143,6 +7183,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
|
||||
|
||||
cfs_rq->h_nr_running -= h_nr_running;
|
||||
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
|
||||
cfs_rq->h_nr_delayed -= h_nr_delayed;
|
||||
|
||||
if (cfs_rq_is_idle(cfs_rq))
|
||||
idle_h_nr_running = h_nr_running;
|
||||
|
@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
|
||||
{
|
||||
if (___update_load_sum(now, &cfs_rq->avg,
|
||||
scale_load_down(cfs_rq->load.weight),
|
||||
cfs_rq->h_nr_running,
|
||||
cfs_rq->h_nr_running - cfs_rq->h_nr_delayed,
|
||||
cfs_rq->curr != NULL)) {
|
||||
|
||||
___update_load_avg(&cfs_rq->avg, 1);
|
||||
|
@ -649,6 +649,7 @@ struct cfs_rq {
|
||||
unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */
|
||||
unsigned int idle_nr_running; /* SCHED_IDLE */
|
||||
unsigned int idle_h_nr_running; /* SCHED_IDLE */
|
||||
unsigned int h_nr_delayed;
|
||||
|
||||
s64 avg_vruntime;
|
||||
u64 avg_load;
|
||||
@ -898,8 +899,11 @@ struct dl_rq {
|
||||
|
||||
static inline void se_update_runnable(struct sched_entity *se)
|
||||
{
|
||||
if (!entity_is_task(se))
|
||||
se->runnable_weight = se->my_q->h_nr_running;
|
||||
if (!entity_is_task(se)) {
|
||||
struct cfs_rq *cfs_rq = se->my_q;
|
||||
|
||||
se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed;
|
||||
}
|
||||
}
|
||||
|
||||
static inline long se_runnable(struct sched_entity *se)
|
||||
|
Loading…
Reference in New Issue
Block a user