mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
rcu: Rename rcu_momentary_dyntick_idle() into rcu_momentary_eqs()
The context_tracking.state RCU_DYNTICKS subvariable has been renamed to RCU_WATCHING, replace "dyntick_idle" into "eqs" to drop the dyntick reference. Signed-off-by: Valentin Schneider <vschneid@redhat.com> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Neeraj Upadhyay <neeraj.upadhyay@kernel.org>
This commit is contained in:
parent
3b18eb3f9f
commit
32a9f26e5e
@ -158,7 +158,7 @@ void rcu_scheduler_starting(void);
|
|||||||
static inline void rcu_end_inkernel_boot(void) { }
|
static inline void rcu_end_inkernel_boot(void) { }
|
||||||
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
|
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
|
||||||
static inline bool rcu_is_watching(void) { return true; }
|
static inline bool rcu_is_watching(void) { return true; }
|
||||||
static inline void rcu_momentary_dyntick_idle(void) { }
|
static inline void rcu_momentary_eqs(void) { }
|
||||||
static inline void kfree_rcu_scheduler_running(void) { }
|
static inline void kfree_rcu_scheduler_running(void) { }
|
||||||
static inline bool rcu_gp_might_be_stalled(void) { return false; }
|
static inline bool rcu_gp_might_be_stalled(void) { return false; }
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ void synchronize_rcu_expedited(void);
|
|||||||
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
|
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
|
||||||
|
|
||||||
void rcu_barrier(void);
|
void rcu_barrier(void);
|
||||||
void rcu_momentary_dyntick_idle(void);
|
void rcu_momentary_eqs(void);
|
||||||
void kfree_rcu_scheduler_running(void);
|
void kfree_rcu_scheduler_running(void);
|
||||||
bool rcu_gp_might_be_stalled(void);
|
bool rcu_gp_might_be_stalled(void);
|
||||||
|
|
||||||
|
@ -2680,7 +2680,7 @@ static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
|
|||||||
rcu_torture_fwd_prog_cond_resched(freed);
|
rcu_torture_fwd_prog_cond_resched(freed);
|
||||||
if (tick_nohz_full_enabled()) {
|
if (tick_nohz_full_enabled()) {
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
rcu_momentary_dyntick_idle();
|
rcu_momentary_eqs();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2830,7 +2830,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
|
|||||||
rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
|
rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
|
||||||
if (tick_nohz_full_enabled()) {
|
if (tick_nohz_full_enabled()) {
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
rcu_momentary_dyntick_idle();
|
rcu_momentary_eqs();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -367,7 +367,7 @@ bool rcu_watching_zero_in_eqs(int cpu, int *vp)
|
|||||||
*
|
*
|
||||||
* The caller must have disabled interrupts and must not be idle.
|
* The caller must have disabled interrupts and must not be idle.
|
||||||
*/
|
*/
|
||||||
notrace void rcu_momentary_dyntick_idle(void)
|
notrace void rcu_momentary_eqs(void)
|
||||||
{
|
{
|
||||||
int seq;
|
int seq;
|
||||||
|
|
||||||
@ -377,7 +377,7 @@ notrace void rcu_momentary_dyntick_idle(void)
|
|||||||
WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
|
WARN_ON_ONCE(!(seq & CT_RCU_WATCHING));
|
||||||
rcu_preempt_deferred_qs(current);
|
rcu_preempt_deferred_qs(current);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
|
EXPORT_SYMBOL_GPL(rcu_momentary_eqs);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
|
* rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
|
||||||
|
@ -917,7 +917,7 @@ static void nocb_cb_wait(struct rcu_data *rdp)
|
|||||||
WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
|
WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
rcu_momentary_dyntick_idle();
|
rcu_momentary_eqs();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
/*
|
/*
|
||||||
* Disable BH to provide the expected environment. Also, when
|
* Disable BH to provide the expected environment. Also, when
|
||||||
|
@ -869,7 +869,7 @@ static void rcu_qs(void)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Register an urgently needed quiescent state. If there is an
|
* Register an urgently needed quiescent state. If there is an
|
||||||
* emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
|
* emergency, invoke rcu_momentary_eqs() to do a heavy-weight
|
||||||
* dyntick-idle quiescent state visible to other CPUs, which will in
|
* dyntick-idle quiescent state visible to other CPUs, which will in
|
||||||
* some cases serve for expedited as well as normal grace periods.
|
* some cases serve for expedited as well as normal grace periods.
|
||||||
* Either way, register a lightweight quiescent state.
|
* Either way, register a lightweight quiescent state.
|
||||||
@ -889,7 +889,7 @@ void rcu_all_qs(void)
|
|||||||
this_cpu_write(rcu_data.rcu_urgent_qs, false);
|
this_cpu_write(rcu_data.rcu_urgent_qs, false);
|
||||||
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
|
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
rcu_momentary_dyntick_idle();
|
rcu_momentary_eqs();
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
rcu_qs();
|
rcu_qs();
|
||||||
@ -909,7 +909,7 @@ void rcu_note_context_switch(bool preempt)
|
|||||||
goto out;
|
goto out;
|
||||||
this_cpu_write(rcu_data.rcu_urgent_qs, false);
|
this_cpu_write(rcu_data.rcu_urgent_qs, false);
|
||||||
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
|
if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
|
||||||
rcu_momentary_dyntick_idle();
|
rcu_momentary_eqs();
|
||||||
out:
|
out:
|
||||||
rcu_tasks_qs(current, preempt);
|
rcu_tasks_qs(current, preempt);
|
||||||
trace_rcu_utilization(TPS("End context switch"));
|
trace_rcu_utilization(TPS("End context switch"));
|
||||||
|
@ -251,7 +251,7 @@ static int multi_cpu_stop(void *data)
|
|||||||
*/
|
*/
|
||||||
touch_nmi_watchdog();
|
touch_nmi_watchdog();
|
||||||
}
|
}
|
||||||
rcu_momentary_dyntick_idle();
|
rcu_momentary_eqs();
|
||||||
} while (curstate != MULTI_STOP_EXIT);
|
} while (curstate != MULTI_STOP_EXIT);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
@ -1535,7 +1535,7 @@ static int run_osnoise(void)
|
|||||||
* This will eventually cause unwarranted noise as PREEMPT_RCU
|
* This will eventually cause unwarranted noise as PREEMPT_RCU
|
||||||
* will force preemption as the means of ending the current
|
* will force preemption as the means of ending the current
|
||||||
* grace period. We avoid this problem by calling
|
* grace period. We avoid this problem by calling
|
||||||
* rcu_momentary_dyntick_idle(), which performs a zero duration
|
* rcu_momentary_eqs(), which performs a zero duration
|
||||||
* EQS allowing PREEMPT_RCU to end the current grace period.
|
* EQS allowing PREEMPT_RCU to end the current grace period.
|
||||||
* This call shouldn't be wrapped inside an RCU critical
|
* This call shouldn't be wrapped inside an RCU critical
|
||||||
* section.
|
* section.
|
||||||
@ -1547,7 +1547,7 @@ static int run_osnoise(void)
|
|||||||
if (!disable_irq)
|
if (!disable_irq)
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
rcu_momentary_dyntick_idle();
|
rcu_momentary_eqs();
|
||||||
|
|
||||||
if (!disable_irq)
|
if (!disable_irq)
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
Loading…
Reference in New Issue
Block a user