mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-03 19:55:31 +00:00
sched/isolation: Introduce housekeeping flags
Before we implement isolcpus under housekeeping, we need the isolation features to be more finegrained. For example some people want NOHZ_FULL without the full scheduler isolation, others want full scheduler isolation without NOHZ_FULL. So let's cut all these isolation features piecewise, at the risk of overcutting it right now. We can still merge some flags later if they always make sense together. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Christoph Lameter <cl@linux.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Wanpeng Li <kernellwp@gmail.com> Link: http://lkml.kernel.org/r/1509072159-31808-9-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
5c4991e24c
commit
de201559df
@ -2270,8 +2270,8 @@ static int __init tile_net_init_module(void)
|
||||
tile_net_dev_init(name, mac);
|
||||
|
||||
if (!network_cpus_init())
|
||||
cpumask_and(&network_cpus_map, housekeeping_cpumask(),
|
||||
cpu_online_mask);
|
||||
cpumask_and(&network_cpus_map,
|
||||
housekeeping_cpumask(HK_FLAG_MISC), cpu_online_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -5,35 +5,43 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
enum hk_flags {
|
||||
HK_FLAG_TIMER = 1,
|
||||
HK_FLAG_RCU = (1 << 1),
|
||||
HK_FLAG_MISC = (1 << 2),
|
||||
HK_FLAG_SCHED = (1 << 3),
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_ISOLATION
|
||||
DECLARE_STATIC_KEY_FALSE(housekeeping_overriden);
|
||||
extern int housekeeping_any_cpu(void);
|
||||
extern const struct cpumask *housekeeping_cpumask(void);
|
||||
extern void housekeeping_affine(struct task_struct *t);
|
||||
extern bool housekeeping_test_cpu(int cpu);
|
||||
extern int housekeeping_any_cpu(enum hk_flags flags);
|
||||
extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
|
||||
extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
|
||||
extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags);
|
||||
extern void __init housekeeping_init(void);
|
||||
|
||||
#else
|
||||
|
||||
static inline int housekeeping_any_cpu(void)
|
||||
static inline int housekeeping_any_cpu(enum hk_flags flags)
|
||||
{
|
||||
return smp_processor_id();
|
||||
}
|
||||
|
||||
static inline const struct cpumask *housekeeping_cpumask(void)
|
||||
static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
|
||||
{
|
||||
return cpu_possible_mask;
|
||||
}
|
||||
|
||||
static inline void housekeeping_affine(struct task_struct *t) { }
|
||||
static inline void housekeeping_affine(struct task_struct *t,
|
||||
enum hk_flags flags) { }
|
||||
static inline void housekeeping_init(void) { }
|
||||
#endif /* CONFIG_CPU_ISOLATION */
|
||||
|
||||
static inline bool housekeeping_cpu(int cpu)
|
||||
static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
|
||||
{
|
||||
#ifdef CONFIG_CPU_ISOLATION
|
||||
if (static_branch_unlikely(&housekeeping_overriden))
|
||||
return housekeeping_test_cpu(cpu);
|
||||
return housekeeping_test_cpu(cpu, flags);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
@ -2584,7 +2584,7 @@ static void rcu_bind_gp_kthread(void)
|
||||
|
||||
if (!tick_nohz_full_enabled())
|
||||
return;
|
||||
housekeeping_affine(current);
|
||||
housekeeping_affine(current, HK_FLAG_RCU);
|
||||
}
|
||||
|
||||
/* Record the current task on dyntick-idle entry. */
|
||||
|
@ -719,7 +719,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
|
||||
LIST_HEAD(rcu_tasks_holdouts);
|
||||
|
||||
/* Run on housekeeping CPUs by default. Sysadm can move if desired. */
|
||||
housekeeping_affine(current);
|
||||
housekeeping_affine(current, HK_FLAG_RCU);
|
||||
|
||||
/*
|
||||
* Each pass through the following loop makes one check for
|
||||
|
@ -527,7 +527,7 @@ int get_nohz_timer_target(void)
|
||||
int i, cpu = smp_processor_id();
|
||||
struct sched_domain *sd;
|
||||
|
||||
if (!idle_cpu(cpu) && housekeeping_cpu(cpu))
|
||||
if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER))
|
||||
return cpu;
|
||||
|
||||
rcu_read_lock();
|
||||
@ -536,15 +536,15 @@ int get_nohz_timer_target(void)
|
||||
if (cpu == i)
|
||||
continue;
|
||||
|
||||
if (!idle_cpu(i) && housekeeping_cpu(i)) {
|
||||
if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER)) {
|
||||
cpu = i;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!housekeeping_cpu(cpu))
|
||||
cpu = housekeeping_any_cpu();
|
||||
if (!housekeeping_cpu(cpu, HK_FLAG_TIMER))
|
||||
cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
return cpu;
|
||||
|
@ -9027,7 +9027,7 @@ void nohz_balance_enter_idle(int cpu)
|
||||
return;
|
||||
|
||||
/* Spare idle load balancing on CPUs that don't want to be disturbed: */
|
||||
if (!housekeeping_cpu(cpu))
|
||||
if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
|
||||
return;
|
||||
|
||||
if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
|
||||
|
@ -15,37 +15,39 @@
|
||||
DEFINE_STATIC_KEY_FALSE(housekeeping_overriden);
|
||||
EXPORT_SYMBOL_GPL(housekeeping_overriden);
|
||||
static cpumask_var_t housekeeping_mask;
|
||||
static unsigned int housekeeping_flags;
|
||||
|
||||
int housekeeping_any_cpu(void)
|
||||
int housekeeping_any_cpu(enum hk_flags flags)
|
||||
{
|
||||
if (static_branch_unlikely(&housekeeping_overriden))
|
||||
return cpumask_any_and(housekeeping_mask, cpu_online_mask);
|
||||
|
||||
if (housekeeping_flags & flags)
|
||||
return cpumask_any_and(housekeeping_mask, cpu_online_mask);
|
||||
return smp_processor_id();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(housekeeping_any_cpu);
|
||||
|
||||
const struct cpumask *housekeeping_cpumask(void)
|
||||
const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
|
||||
{
|
||||
if (static_branch_unlikely(&housekeeping_overriden))
|
||||
return housekeeping_mask;
|
||||
|
||||
if (housekeeping_flags & flags)
|
||||
return housekeeping_mask;
|
||||
return cpu_possible_mask;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(housekeeping_cpumask);
|
||||
|
||||
void housekeeping_affine(struct task_struct *t)
|
||||
void housekeeping_affine(struct task_struct *t, enum hk_flags flags)
|
||||
{
|
||||
if (static_branch_unlikely(&housekeeping_overriden))
|
||||
set_cpus_allowed_ptr(t, housekeeping_mask);
|
||||
if (housekeeping_flags & flags)
|
||||
set_cpus_allowed_ptr(t, housekeeping_mask);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(housekeeping_affine);
|
||||
|
||||
bool housekeeping_test_cpu(int cpu)
|
||||
bool housekeeping_test_cpu(int cpu, enum hk_flags flags)
|
||||
{
|
||||
if (static_branch_unlikely(&housekeeping_overriden))
|
||||
return cpumask_test_cpu(cpu, housekeeping_mask);
|
||||
|
||||
if (housekeeping_flags & flags)
|
||||
return cpumask_test_cpu(cpu, housekeeping_mask);
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(housekeeping_test_cpu);
|
||||
@ -65,6 +67,8 @@ void __init housekeeping_init(void)
|
||||
cpumask_andnot(housekeeping_mask,
|
||||
cpu_possible_mask, tick_nohz_full_mask);
|
||||
|
||||
housekeeping_flags = HK_FLAG_TIMER | HK_FLAG_RCU | HK_FLAG_MISC;
|
||||
|
||||
static_branch_enable(&housekeeping_overriden);
|
||||
|
||||
/* We need at least one CPU to handle housekeeping work */
|
||||
|
@ -777,7 +777,8 @@ void __init lockup_detector_init(void)
|
||||
if (tick_nohz_full_enabled())
|
||||
pr_info("Disabling watchdog on nohz_full cores by default\n");
|
||||
|
||||
cpumask_copy(&watchdog_cpumask, housekeeping_cpumask());
|
||||
cpumask_copy(&watchdog_cpumask,
|
||||
housekeeping_cpumask(HK_FLAG_TIMER));
|
||||
|
||||
if (!watchdog_nmi_probe())
|
||||
nmi_watchdog_available = true;
|
||||
|
Loading…
Reference in New Issue
Block a user