mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
sched,arm64: Handle CPU isolation on last resort fallback rq selection
When a kthread or any other task has an affinity mask that is fully offline or unallowed, the scheduler reaffines the task to all possible CPUs as a last resort. This default decision doesn't mix up very well with nohz_full CPUs that are part of the possible cpumask but don't want to be disturbed by unbound kthreads or even detached pinned user tasks. Make the fallback affinity setting aware of nohz_full. Suggested-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
This commit is contained in:
parent
9b0379cd58
commit
8baab05e0d
@ -671,6 +671,7 @@ static inline bool supports_clearbhb(int scope)
|
||||
}
|
||||
|
||||
const struct cpumask *system_32bit_el0_cpumask(void);
|
||||
const struct cpumask *fallback_32bit_el0_cpumask(void);
|
||||
DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
|
||||
|
||||
static inline bool system_supports_32bit_el0(void)
|
||||
|
@ -283,6 +283,8 @@ task_cpu_possible_mask(struct task_struct *p)
|
||||
}
|
||||
#define task_cpu_possible_mask task_cpu_possible_mask
|
||||
|
||||
const struct cpumask *task_cpu_fallback_mask(struct task_struct *p);
|
||||
|
||||
void verify_cpu_asid_bits(void);
|
||||
void post_ttbr_update_workaround(void);
|
||||
|
||||
|
@ -1642,6 +1642,17 @@ const struct cpumask *system_32bit_el0_cpumask(void)
|
||||
return cpu_possible_mask;
|
||||
}
|
||||
|
||||
const struct cpumask *task_cpu_fallback_mask(struct task_struct *p)
|
||||
{
|
||||
if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
|
||||
return housekeeping_cpumask(HK_TYPE_TICK);
|
||||
|
||||
if (!is_compat_thread(task_thread_info(p)))
|
||||
return housekeeping_cpumask(HK_TYPE_TICK);
|
||||
|
||||
return system_32bit_el0_cpumask();
|
||||
}
|
||||
|
||||
static int __init parse_32bit_el0_param(char *str)
|
||||
{
|
||||
allow_mismatched_32bit_el0 = true;
|
||||
|
@ -24,6 +24,7 @@ static inline void leave_mm(void) { }
|
||||
#ifndef task_cpu_possible_mask
|
||||
# define task_cpu_possible_mask(p) cpu_possible_mask
|
||||
# define task_cpu_possible(cpu, p) true
|
||||
# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_TICK)
|
||||
#else
|
||||
# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
|
||||
#endif
|
||||
|
@ -3534,7 +3534,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
||||
*
|
||||
* More yuck to audit.
|
||||
*/
|
||||
do_set_cpus_allowed(p, task_cpu_possible_mask(p));
|
||||
do_set_cpus_allowed(p, task_cpu_fallback_mask(p));
|
||||
state = fail;
|
||||
break;
|
||||
case fail:
|
||||
|
Loading…
Reference in New Issue
Block a user