mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-07 22:03:14 +00:00
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes, most of them SCHED_DEADLINE fallout" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/deadline: Prevent rt_time growth to infinity sched/deadline: Switch CPU's presence test order sched/deadline: Cleanup RT leftovers from {inc/dec}_dl_migration sched: Fix double normalization of vruntime
This commit is contained in:
commit
0c0bd34a14
@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)
|
||||
|
||||
static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
|
||||
{
|
||||
WARN_ON(!cpu_present(idx) || idx == IDX_INVALID);
|
||||
WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
|
||||
|
||||
if (dl_time_before(new_dl, cp->elements[idx].dl)) {
|
||||
cp->elements[idx].dl = new_dl;
|
||||
@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
||||
}
|
||||
|
||||
out:
|
||||
WARN_ON(!cpu_present(best_cpu) && best_cpu != -1);
|
||||
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
|
||||
|
||||
return best_cpu;
|
||||
}
|
||||
|
@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq)
|
||||
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
||||
{
|
||||
struct task_struct *p = dl_task_of(dl_se);
|
||||
dl_rq = &rq_of_dl_rq(dl_rq)->dl;
|
||||
|
||||
if (p->nr_cpus_allowed > 1)
|
||||
dl_rq->dl_nr_migratory++;
|
||||
@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
||||
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
|
||||
{
|
||||
struct task_struct *p = dl_task_of(dl_se);
|
||||
dl_rq = &rq_of_dl_rq(dl_rq)->dl;
|
||||
|
||||
if (p->nr_cpus_allowed > 1)
|
||||
dl_rq->dl_nr_migratory--;
|
||||
@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
|
||||
return 1;
|
||||
}
|
||||
|
||||
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
|
||||
|
||||
/*
|
||||
* Update the current task's runtime statistics (provided it is still
|
||||
* a -deadline task and has not been removed from the dl_rq).
|
||||
@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq)
|
||||
struct rt_rq *rt_rq = &rq->rt;
|
||||
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
rt_rq->rt_time += delta_exec;
|
||||
/*
|
||||
* We'll let actual RT tasks worry about the overflow here, we
|
||||
* have our own CBS to keep us inline -- see above.
|
||||
* have our own CBS to keep us inline; only account when RT
|
||||
* bandwidth is relevant.
|
||||
*/
|
||||
if (sched_rt_bandwidth_account(rt_rq))
|
||||
rt_rq->rt_time += delta_exec;
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
}
|
||||
|
@ -7001,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
|
||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||
|
||||
/*
|
||||
* Ensure the task's vruntime is normalized, so that when its
|
||||
* Ensure the task's vruntime is normalized, so that when it's
|
||||
* switched back to the fair class the enqueue_entity(.flags=0) will
|
||||
* do the right thing.
|
||||
*
|
||||
* If it was on_rq, then the dequeue_entity(.flags=0) will already
|
||||
* have normalized the vruntime, if it was !on_rq, then only when
|
||||
* If it's on_rq, then the dequeue_entity(.flags=0) will already
|
||||
* have normalized the vruntime, if it's !on_rq, then only when
|
||||
* the task is sleeping will it still have non-normalized vruntime.
|
||||
*/
|
||||
if (!se->on_rq && p->state != TASK_RUNNING) {
|
||||
if (!p->on_rq && p->state != TASK_RUNNING) {
|
||||
/*
|
||||
* Fix up our vruntime so that the current sleep doesn't
|
||||
* cause 'unlimited' sleep bonus.
|
||||
|
@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
|
||||
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
|
||||
bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
|
||||
{
|
||||
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
||||
|
||||
return (hrtimer_active(&rt_b->rt_period_timer) ||
|
||||
rt_rq->rt_time < rt_b->rt_runtime);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* We ran out of runtime, see if we can borrow some from our neighbours.
|
||||
|
Loading…
Reference in New Issue
Block a user