mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 11:57:46 +00:00
Merge branch into tip/master: 'x86/mm'
# New commits in x86/mm: aa135d1d0902 ("x86/mm: Remove unnecessary include of <linux/extable.h>") dd4059634dab ("x86/mtrr: Rename mtrr_overwrite_state() to guest_force_mtrr_state()") 9d93db0d1881 ("x86/mm/selftests: Fix typo in lam.c") 6db2526c1d69 ("x86/mm/tlb: Only trim the mm_cpumask once a second") 953753db887f ("x86/mm/tlb: Also remove local CPU from mm_cpumask if stale") 2815a56e4b72 ("x86/mm/tlb: Add tracepoint for TLB flush IPI to stale CPU") 209954cbc7d0 ("x86/mm/tlb: Update mm_cpumask lazily") Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
44383c12d5
@ -37,6 +37,8 @@ typedef struct {
|
||||
*/
|
||||
atomic64_t tlb_gen;
|
||||
|
||||
unsigned long next_trim_cpumask;
|
||||
|
||||
#ifdef CONFIG_MODIFY_LDT_SYSCALL
|
||||
struct rw_semaphore ldt_usr_sem;
|
||||
struct ldt_struct *ldt;
|
||||
|
@ -151,6 +151,7 @@ static inline int init_new_context(struct task_struct *tsk,
|
||||
|
||||
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
|
||||
atomic64_set(&mm->context.tlb_gen, 0);
|
||||
mm->context.next_trim_cpumask = jiffies + HZ;
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
|
||||
|
@ -222,6 +222,7 @@ struct flush_tlb_info {
|
||||
unsigned int initiating_cpu;
|
||||
u8 stride_shift;
|
||||
u8 freed_tables;
|
||||
u8 trim_cpumask;
|
||||
};
|
||||
|
||||
void flush_tlb_local(void);
|
||||
|
@ -1854,11 +1854,18 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
|
||||
return temp_state;
|
||||
}
|
||||
|
||||
__ro_after_init struct mm_struct *poking_mm;
|
||||
__ro_after_init unsigned long poking_addr;
|
||||
|
||||
static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
switch_mm_irqs_off(NULL, prev_state.mm, current);
|
||||
|
||||
/* Clear the cpumask, to indicate no TLB flushing is needed anywhere */
|
||||
cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(poking_mm));
|
||||
|
||||
/*
|
||||
* Restore the breakpoints if they were disabled before the temporary mm
|
||||
* was loaded.
|
||||
@ -1867,9 +1874,6 @@ static inline void unuse_temporary_mm(temp_mm_state_t prev_state)
|
||||
hw_breakpoint_restore();
|
||||
}
|
||||
|
||||
__ro_after_init struct mm_struct *poking_mm;
|
||||
__ro_after_init unsigned long poking_addr;
|
||||
|
||||
static void text_poke_memcpy(void *dst, const void *src, size_t len)
|
||||
{
|
||||
memcpy(dst, src, len);
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <linux/sched.h> /* test_thread_flag(), ... */
|
||||
#include <linux/sched/task_stack.h> /* task_stack_*(), ... */
|
||||
#include <linux/kdebug.h> /* oops_begin/end, ... */
|
||||
#include <linux/extable.h> /* search_exception_tables */
|
||||
#include <linux/memblock.h> /* max_low_pfn */
|
||||
#include <linux/kfence.h> /* kfence_handle_page_fault */
|
||||
#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
|
||||
|
@ -607,18 +607,15 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next,
|
||||
cond_mitigation(tsk);
|
||||
|
||||
/*
|
||||
* Stop remote flushes for the previous mm.
|
||||
* Skip kernel threads; we never send init_mm TLB flushing IPIs,
|
||||
* but the bitmap manipulation can cause cache line contention.
|
||||
* Leave this CPU in prev's mm_cpumask. Atomic writes to
|
||||
* mm_cpumask can be expensive under contention. The CPU
|
||||
* will be removed lazily at TLB flush time.
|
||||
*/
|
||||
if (prev != &init_mm) {
|
||||
VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu,
|
||||
mm_cpumask(prev)));
|
||||
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
||||
}
|
||||
VM_WARN_ON_ONCE(prev != &init_mm && !cpumask_test_cpu(cpu,
|
||||
mm_cpumask(prev)));
|
||||
|
||||
/* Start receiving IPIs and then read tlb_gen (and LAM below) */
|
||||
if (next != &init_mm)
|
||||
if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next)))
|
||||
cpumask_set_cpu(cpu, mm_cpumask(next));
|
||||
next_tlb_gen = atomic64_read(&next->context.tlb_gen);
|
||||
|
||||
@ -760,10 +757,13 @@ static void flush_tlb_func(void *info)
|
||||
if (!local) {
|
||||
inc_irq_stat(irq_tlb_count);
|
||||
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
|
||||
}
|
||||
|
||||
/* Can only happen on remote CPUs */
|
||||
if (f->mm && f->mm != loaded_mm)
|
||||
return;
|
||||
/* The CPU was left in the mm_cpumask of the target mm. Clear it. */
|
||||
if (f->mm && f->mm != loaded_mm) {
|
||||
cpumask_clear_cpu(raw_smp_processor_id(), mm_cpumask(f->mm));
|
||||
trace_tlb_flush(TLB_REMOTE_WRONG_CPU, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (unlikely(loaded_mm == &init_mm))
|
||||
@ -893,9 +893,36 @@ done:
|
||||
nr_invalidate);
|
||||
}
|
||||
|
||||
static bool tlb_is_not_lazy(int cpu, void *data)
|
||||
static bool should_flush_tlb(int cpu, void *data)
|
||||
{
|
||||
return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
|
||||
struct flush_tlb_info *info = data;
|
||||
|
||||
/* Lazy TLB will get flushed at the next context switch. */
|
||||
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
|
||||
return false;
|
||||
|
||||
/* No mm means kernel memory flush. */
|
||||
if (!info->mm)
|
||||
return true;
|
||||
|
||||
/* The target mm is loaded, and the CPU is not lazy. */
|
||||
if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
|
||||
return true;
|
||||
|
||||
/* In cpumask, but not the loaded mm? Periodically remove by flushing. */
|
||||
if (info->trim_cpumask)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool should_trim_cpumask(struct mm_struct *mm)
|
||||
{
|
||||
if (time_after(jiffies, READ_ONCE(mm->context.next_trim_cpumask))) {
|
||||
WRITE_ONCE(mm->context.next_trim_cpumask, jiffies + HZ);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
|
||||
@ -929,7 +956,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
|
||||
if (info->freed_tables)
|
||||
on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
|
||||
else
|
||||
on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
|
||||
on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func,
|
||||
(void *)info, 1, cpumask);
|
||||
}
|
||||
|
||||
@ -980,6 +1007,7 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
|
||||
info->freed_tables = freed_tables;
|
||||
info->new_tlb_gen = new_tlb_gen;
|
||||
info->initiating_cpu = smp_processor_id();
|
||||
info->trim_cpumask = 0;
|
||||
|
||||
return info;
|
||||
}
|
||||
@ -1022,6 +1050,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||
* flush_tlb_func_local() directly in this case.
|
||||
*/
|
||||
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
|
||||
info->trim_cpumask = should_trim_cpumask(mm);
|
||||
flush_tlb_multi(mm_cpumask(mm), info);
|
||||
} else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
|
||||
lockdep_assert_irqs_enabled();
|
||||
|
@ -1404,6 +1404,7 @@ enum tlb_flush_reason {
|
||||
TLB_LOCAL_SHOOTDOWN,
|
||||
TLB_LOCAL_MM_SHOOTDOWN,
|
||||
TLB_REMOTE_SEND_IPI,
|
||||
TLB_REMOTE_WRONG_CPU,
|
||||
NR_TLB_FLUSH_REASONS,
|
||||
};
|
||||
|
||||
|
@ -237,7 +237,7 @@ static uint64_t set_metadata(uint64_t src, unsigned long lam)
|
||||
* both pointers should point to the same address.
|
||||
*
|
||||
* @return:
|
||||
* 0: value on the pointer with metadate and value on original are same
|
||||
* 0: value on the pointer with metadata and value on original are same
|
||||
* 1: not same.
|
||||
*/
|
||||
static int handle_lam_test(void *src, unsigned int lam)
|
||||
|
Loading…
x
Reference in New Issue
Block a user