mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-11 07:30:16 +00:00
sched/autogroup: Fix failure to set cpu.rt_runtime_us
Because task_group() uses a cache of autogroup_task_group(), whose output depends on sched_class, switching classes can generate problems. In particular, when started as fair, the cache points to the autogroup, so when switching to RT the tg_rt_schedulable() test fails for every cpu.rt_{runtime,period}_us change because now the autogroup has tasks and no runtime. Furthermore, going back to the previous semantics of varying task_group() with sched_class has the down-side that the sched_debug output varies as well, even though the task really is in the autogroup. Therefore add an autogroup exception to tg_has_rt_tasks() -- such that both (all) task_group() usages in sched/core now have one. And remove all the remnants of the variable task_group() output. Reported-by: Zefan Li <lizefan@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Stefan Bader <stefan.bader@canonical.com> Fixes: 8323f26ce342 ("sched: Fix race in task_group()") Link: http://lkml.kernel.org/r/20150209112237.GR5029@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6f1607f1bd
commit
1fe89e1b6d
@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void)
|
||||
* so we don't have to move tasks around upon policy change,
|
||||
* or flail around trying to allocate bandwidth on the fly.
|
||||
* A bandwidth exception in __sched_setscheduler() allows
|
||||
* the policy change to proceed. Thereafter, task_group()
|
||||
* returns &root_task_group, so zero bandwidth is required.
|
||||
* the policy change to proceed.
|
||||
*/
|
||||
free_rt_sched_group(tg);
|
||||
tg->rt_se = root_task_group.rt_se;
|
||||
@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
||||
if (tg != &root_task_group)
|
||||
return false;
|
||||
|
||||
if (p->sched_class != &fair_sched_class)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We can only assume the task group can't go away on us if
|
||||
* autogroup_move_group() can see us on ->thread_group list.
|
||||
|
@ -7577,6 +7577,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
|
||||
/*
|
||||
* Autogroups do not have RT tasks; see autogroup_create().
|
||||
*/
|
||||
if (task_group_is_autogroup(tg))
|
||||
return 0;
|
||||
|
||||
for_each_process_thread(g, p) {
|
||||
if (rt_task(p) && task_group(p) == tg)
|
||||
return 1;
|
||||
|
Loading…
x
Reference in New Issue
Block a user