mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-07 13:53:24 +00:00
tick/nohz: Only check for RCU deferred wakeup on user/guest entry when needed
Checking for and processing RCU-nocb deferred wakeup upon user/guest entry is only relevant when nohz_full runs on the local CPU, otherwise the periodic tick should take care of it. Make sure we don't needlessly pollute these fast-paths as a -3% performance regression on a will-it-scale.per_process_ops has been reported so far. Fixes:47b8ff194c
(entry: Explicitly flush pending rcuog wakeup before last rescheduling point) Fixes:4ae7dc97f7
(entry/kvm: Explicitly flush pending rcuog wakeup before last rescheduling point) Reported-by: kernel test robot <oliver.sang@intel.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Paul E. McKenney <paulmck@kernel.org> Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20210527113441.465489-1-frederic@kernel.org
This commit is contained in:
parent
02da26ad5e
commit
f268c3737e
@ -3,6 +3,7 @@
|
||||
#define __LINUX_ENTRYKVM_H
|
||||
|
||||
#include <linux/entry-common.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
/* Transfer to guest mode work */
|
||||
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
|
||||
@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
|
||||
static inline void xfer_to_guest_mode_prepare(void)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
rcu_nocb_flush_deferred_wakeup();
|
||||
tick_nohz_user_enter_prepare();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/context_tracking_state.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
extern void __init tick_init(void);
|
||||
@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void)
|
||||
__tick_nohz_task_switch();
|
||||
}
|
||||
|
||||
static inline void tick_nohz_user_enter_prepare(void)
|
||||
{
|
||||
if (tick_nohz_full_cpu(smp_processor_id()))
|
||||
rcu_nocb_flush_deferred_wakeup();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/livepatch.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
|
||||
local_irq_disable_exit_to_user();
|
||||
|
||||
/* Check if any of the above work has queued a deferred wakeup */
|
||||
rcu_nocb_flush_deferred_wakeup();
|
||||
tick_nohz_user_enter_prepare();
|
||||
|
||||
ti_work = READ_ONCE(current_thread_info()->flags);
|
||||
}
|
||||
@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/* Flush pending rcuog wakeup before the last need_resched() check */
|
||||
rcu_nocb_flush_deferred_wakeup();
|
||||
tick_nohz_user_enter_prepare();
|
||||
|
||||
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
|
||||
ti_work = exit_to_user_mode_loop(regs, ti_work);
|
||||
|
@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
cpumask_var_t tick_nohz_full_mask;
|
||||
EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
|
||||
bool tick_nohz_full_running;
|
||||
EXPORT_SYMBOL_GPL(tick_nohz_full_running);
|
||||
static atomic_t tick_dep_mask;
|
||||
|
Loading…
Reference in New Issue
Block a user