mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-10 15:58:47 +00:00
sched/core: Optimize pick_next_task()
Ever since we moved the sched_class definitions into their own files, the constant expression {fair,idle}_sched_class.pick_next_task() is not in fact a compile time constant anymore and results in an indirect call (barring LTO). Fix that by exposing pick_next_task_{fair,idle}() directly, this gets rid of the indirect call (and RETPOLINE) on the fast path. Also remove the unlikely() from the idle case, it is in fact /the/ way we select idle -- and that is a very common thing to do. Performance for will-it-scale/sched_yield improves by 2% (as reported by 0-day). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bsegall@google.com Cc: dietmar.eggemann@arm.com Cc: juri.lelli@redhat.com Cc: ktkhai@virtuozzo.com Cc: mgorman@suse.de Cc: qais.yousef@arm.com Cc: qperret@google.com Cc: rostedt@goodmis.org Cc: valentin.schneider@arm.com Cc: vincent.guittot@linaro.org Link: https://lkml.kernel.org/r/20191108131909.603037345@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f488e1057b
commit
5d7d605642
@ -3917,14 +3917,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||
prev->sched_class == &fair_sched_class) &&
|
||||
rq->nr_running == rq->cfs.h_nr_running)) {
|
||||
|
||||
p = fair_sched_class.pick_next_task(rq, prev, rf);
|
||||
p = pick_next_task_fair(rq, prev, rf);
|
||||
if (unlikely(p == RETRY_TASK))
|
||||
goto restart;
|
||||
|
||||
/* Assumes fair_sched_class->next == idle_sched_class */
|
||||
if (unlikely(!p)) {
|
||||
if (!p) {
|
||||
put_prev_task(rq, prev);
|
||||
p = idle_sched_class.pick_next_task(rq, NULL, NULL);
|
||||
p = pick_next_task_idle(rq, NULL, NULL);
|
||||
}
|
||||
|
||||
return p;
|
||||
|
@ -6611,7 +6611,7 @@ preempt:
|
||||
set_last_buddy(se);
|
||||
}
|
||||
|
||||
static struct task_struct *
|
||||
struct task_struct *
|
||||
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = &rq->cfs;
|
||||
|
@ -391,7 +391,7 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next)
|
||||
schedstat_inc(rq->sched_goidle);
|
||||
}
|
||||
|
||||
static struct task_struct *
|
||||
struct task_struct *
|
||||
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||
{
|
||||
struct task_struct *next = rq->idle;
|
||||
|
@ -1821,6 +1821,9 @@ static inline bool sched_fair_runnable(struct rq *rq)
|
||||
return rq->cfs.nr_running > 0;
|
||||
}
|
||||
|
||||
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
|
||||
extern struct task_struct *pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
extern void update_group_capacity(struct sched_domain *sd, int cpu);
|
||||
|
Loading…
x
Reference in New Issue
Block a user