mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
8baab05e0d
When a kthread or any other task has an affinity mask that is fully offline or unallowed, the scheduler reaffines the task to all possible CPUs as a last resort. This default decision doesn't mix up very well with nohz_full CPUs that are part of the possible cpumask but don't want to be disturbed by unbound kthreads or even detached pinned user tasks. Make the fallback affinity setting aware of nohz_full. Suggested-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
47 lines
1.1 KiB
C
47 lines
1.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_MMU_CONTEXT_H
|
|
#define _LINUX_MMU_CONTEXT_H
|
|
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/mmu.h>
|
|
|
|
/* Architectures that care about IRQ state in switch_mm can override this. */
|
|
#ifndef switch_mm_irqs_off
|
|
# define switch_mm_irqs_off switch_mm
|
|
#endif
|
|
|
|
#ifndef leave_mm
|
|
static inline void leave_mm(void) { }
|
|
#endif
|
|
|
|
/*
|
|
* CPUs that are capable of running user task @p. Must contain at least one
|
|
* active CPU. It is assumed that the kernel can run on all CPUs, so calling
|
|
* this for a kernel thread is pointless.
|
|
*
|
|
* By default, we assume a sane, homogeneous system.
|
|
*/
|
|
#ifndef task_cpu_possible_mask
|
|
# define task_cpu_possible_mask(p) cpu_possible_mask
|
|
# define task_cpu_possible(cpu, p) true
|
|
# define task_cpu_fallback_mask(p) housekeeping_cpumask(HK_TYPE_TICK)
|
|
#else
|
|
# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
|
|
#endif
|
|
|
|
#ifndef mm_untag_mask
|
|
static inline unsigned long mm_untag_mask(struct mm_struct *mm)
|
|
{
|
|
return -1UL;
|
|
}
|
|
#endif
|
|
|
|
#ifndef arch_pgtable_dma_compat
|
|
static inline bool arch_pgtable_dma_compat(struct mm_struct *mm)
|
|
{
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#endif
|