sched: Split up put_prev_task_balance()

With the goal of pushing put_prev_task() after pick_task() / into
pick_next_task().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240813224015.943143811@infradead.org
This commit is contained in:
Peter Zijlstra 2024-08-14 00:25:52 +02:00
parent 4686cc598f
commit 260598f142

View File

@ -5841,8 +5841,8 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
schedstat_inc(this_rq()->sched_count);
}
static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
struct rq_flags *rf)
static void prev_balance(struct rq *rq, struct task_struct *prev,
struct rq_flags *rf)
{
#ifdef CONFIG_SMP
const struct sched_class *class;
@ -5860,8 +5860,6 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
}
#endif
put_prev_task(rq, prev);
/*
* We've updated @prev and no longer need the server link, clear it.
* Must be done before ->pick_next_task() because that can (re)set
@ -5917,7 +5915,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
restart:
put_prev_task_balance(rq, prev, rf);
prev_balance(rq, prev, rf);
put_prev_task(rq, prev);
for_each_class(class) {
p = class->pick_next_task(rq);
@ -6017,7 +6016,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
goto out;
}
put_prev_task_balance(rq, prev, rf);
prev_balance(rq, prev, rf);
put_prev_task(rq, prev);
smt_mask = cpu_smt_mask(cpu);
need_sync = !!rq->core->core_cookie;