2021-05-19 00:09:28 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0+ */
|
|
|
|
/*
|
|
|
|
* Read-Copy Update mechanism for mutual exclusion (tree-based version)
|
|
|
|
* Internal non-public definitions that provide either classic
|
|
|
|
* or preemptible semantics.
|
|
|
|
*
|
|
|
|
* Copyright Red Hat, 2009
|
|
|
|
* Copyright IBM Corporation, 2009
|
|
|
|
* Copyright SUSE, 2021
|
|
|
|
*
|
|
|
|
* Author: Ingo Molnar <mingo@elte.hu>
|
|
|
|
* Paul E. McKenney <paulmck@linux.ibm.com>
|
|
|
|
* Frederic Weisbecker <frederic@kernel.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_RCU_NOCB_CPU
|
|
|
|
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
|
|
|
|
static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
|
|
|
|
|
|
|
|
static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
/* Race on early boot between thread creation and assignment */
|
|
|
|
if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
|
|
|
|
if (in_task())
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Offload callback processing from the boot-time-specified set of CPUs
|
|
|
|
* specified by rcu_nocb_mask. For the CPUs in the set, there are kthreads
|
|
|
|
* created that pull the callbacks from the corresponding CPU, wait for
|
|
|
|
* a grace period to elapse, and invoke the callbacks. These kthreads
|
|
|
|
* are organized into GP kthreads, which manage incoming callbacks, wait for
|
|
|
|
* grace periods, and awaken CB kthreads, and the CB kthreads, which only
|
|
|
|
* invoke callbacks. Each GP kthread invokes its own CBs. The no-CBs CPUs
|
|
|
|
* do a wake_up() on their GP kthread when they insert a callback into any
|
|
|
|
* empty list, unless the rcu_nocb_poll boot parameter has been specified,
|
|
|
|
* in which case each kthread actively polls its CPU. (Which isn't so great
|
|
|
|
* for energy efficiency, but which does reduce RCU's overhead on that CPU.)
|
|
|
|
*
|
|
|
|
* This is intended to be used in conjunction with Frederic Weisbecker's
|
|
|
|
* adaptive-idle work, which would seriously reduce OS jitter on CPUs
|
|
|
|
* running CPU-bound user-mode computations.
|
|
|
|
*
|
|
|
|
* Offloading of callbacks can also be used as an energy-efficiency
|
|
|
|
* measure because CPUs with no RCU callbacks queued are more aggressive
|
|
|
|
* about entering dyntick-idle mode.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
|
|
|
|
* If the list is invalid, a warning is emitted and all CPUs are offloaded.
|
|
|
|
*/
|
|
|
|
static int __init rcu_nocb_setup(char *str)
|
|
|
|
{
|
|
|
|
alloc_bootmem_cpumask_var(&rcu_nocb_mask);
|
2021-11-23 00:37:07 +00:00
|
|
|
if (*str == '=') {
|
|
|
|
if (cpulist_parse(++str, rcu_nocb_mask)) {
|
|
|
|
pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
|
|
|
|
cpumask_setall(rcu_nocb_mask);
|
|
|
|
}
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
2022-02-14 13:23:39 +00:00
|
|
|
rcu_state.nocb_is_setup = true;
|
2021-05-19 00:09:28 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2021-11-23 00:37:07 +00:00
|
|
|
__setup("rcu_nocbs", rcu_nocb_setup);
|
2021-05-19 00:09:28 +00:00
|
|
|
|
|
|
|
static int __init parse_rcu_nocb_poll(char *arg)
|
|
|
|
{
|
|
|
|
rcu_nocb_poll = true;
|
rcu: Make the rcu_nocb_poll boot parameter usable via boot config
The rcu_nocb_poll kernel boot parameter is defined via early_param(),
whose parsing functions are invoked from parse_early_param() which
is in turn invoked by setup_arch(), which is very early indeed. It
is invoked so early that the console output timestamps read 0.000000,
in other words, before time begins.
This use of early_param() means that the rcu_nocb_poll kernel boot
parameter cannot usefully be embedded into the kernel image. Yes, you
can embed it, but setup_boot_config() is invoked from start_kernel()
too late for it to be parsed.
But it makes no sense to parse this parameter so early. After all,
it cannot do anything until the rcuog kthreads are created, which is
long after rcu_init() time, let alone setup_boot_config() time.
This commit therefore switches the rcu_nocb_poll kernel boot parameter
from early_param() to __setup(), which allows boot-config parsing of
this parameter, in turn allowing it to be embedded into the kernel image.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
2023-07-05 23:20:58 +00:00
|
|
|
return 1;
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
rcu: Make the rcu_nocb_poll boot parameter usable via boot config
The rcu_nocb_poll kernel boot parameter is defined via early_param(),
whose parsing functions are invoked from parse_early_param() which
is in turn invoked by setup_arch(), which is very early indeed. It
is invoked so early that the console output timestamps read 0.000000,
in other words, before time begins.
This use of early_param() means that the rcu_nocb_poll kernel boot
parameter cannot usefully be embedded into the kernel image. Yes, you
can embed it, but setup_boot_config() is invoked from start_kernel()
too late for it to be parsed.
But it makes no sense to parse this parameter so early. After all,
it cannot do anything until the rcuog kthreads are created, which is
long after rcu_init() time, let alone setup_boot_config() time.
This commit therefore switches the rcu_nocb_poll kernel boot parameter
from early_param() to __setup(), which allows boot-config parsing of
this parameter, in turn allowing it to be embedded into the kernel image.
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
2023-07-05 23:20:58 +00:00
|
|
|
__setup("rcu_nocb_poll", parse_rcu_nocb_poll);
|
2021-05-19 00:09:28 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't bother bypassing ->cblist if the call_rcu() rate is low.
|
|
|
|
* After all, the main point of bypassing is to avoid lock contention
|
|
|
|
* on ->nocb_lock, which only can happen at high call_rcu() rates.
|
|
|
|
*/
|
|
|
|
static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
|
|
|
|
module_param(nocb_nobypass_lim_per_jiffy, int, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Acquire the specified rcu_data structure's ->nocb_bypass_lock. If the
|
2024-04-25 14:18:35 +00:00
|
|
|
* lock isn't immediately available, perform minimal sanity check.
|
2021-05-19 00:09:28 +00:00
|
|
|
*/
|
|
|
|
static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
|
|
|
|
__acquires(&rdp->nocb_bypass_lock)
|
|
|
|
{
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
if (raw_spin_trylock(&rdp->nocb_bypass_lock))
|
|
|
|
return;
|
2024-04-25 14:18:35 +00:00
|
|
|
/*
|
|
|
|
* Contention expected only when local enqueue collide with
|
|
|
|
* remote flush from kthreads.
|
|
|
|
*/
|
2021-05-19 00:09:28 +00:00
|
|
|
WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
|
|
|
|
raw_spin_lock(&rdp->nocb_bypass_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Conditionally acquire the specified rcu_data structure's
|
|
|
|
* ->nocb_bypass_lock.
|
|
|
|
*/
|
|
|
|
static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
return raw_spin_trylock(&rdp->nocb_bypass_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the specified rcu_data structure's ->nocb_bypass_lock.
|
|
|
|
*/
|
|
|
|
static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
|
|
|
|
__releases(&rdp->nocb_bypass_lock)
|
|
|
|
{
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
raw_spin_unlock(&rdp->nocb_bypass_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Acquire the specified rcu_data structure's ->nocb_lock, but only
|
|
|
|
* if it corresponds to a no-CBs CPU.
|
|
|
|
*/
|
|
|
|
static void rcu_nocb_lock(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
if (!rcu_rdp_is_offloaded(rdp))
|
|
|
|
return;
|
|
|
|
raw_spin_lock(&rdp->nocb_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the specified rcu_data structure's ->nocb_lock, but only
|
|
|
|
* if it corresponds to a no-CBs CPU.
|
|
|
|
*/
|
|
|
|
static void rcu_nocb_unlock(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
if (rcu_rdp_is_offloaded(rdp)) {
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
raw_spin_unlock(&rdp->nocb_lock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the specified rcu_data structure's ->nocb_lock and restore
|
|
|
|
* interrupts, but only if it corresponds to a no-CBs CPU.
|
|
|
|
*/
|
|
|
|
static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
if (rcu_rdp_is_offloaded(rdp)) {
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
|
|
|
} else {
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Lockdep check that ->cblist may be safely accessed. */
|
|
|
|
static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
if (rcu_rdp_is_offloaded(rdp))
|
|
|
|
lockdep_assert_held(&rdp->nocb_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
|
|
|
|
* grace period.
|
|
|
|
*/
|
|
|
|
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
|
|
|
|
{
|
|
|
|
swake_up_all(sq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_init_one_nocb(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
init_swait_queue_head(&rnp->nocb_gp_wq[0]);
|
|
|
|
init_swait_queue_head(&rnp->nocb_gp_wq[1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
|
|
|
|
struct rcu_data *rdp,
|
|
|
|
bool force, unsigned long flags)
|
|
|
|
__releases(rdp_gp->nocb_gp_lock)
|
|
|
|
{
|
|
|
|
bool needwake = false;
|
|
|
|
|
|
|
|
if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
|
|
|
|
raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
|
|
|
|
TPS("AlreadyAwake"));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
|
|
|
|
WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
|
|
|
|
del_timer(&rdp_gp->nocb_timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
|
|
|
|
WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
|
|
|
|
needwake = true;
|
|
|
|
}
|
|
|
|
raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
|
|
|
|
if (needwake) {
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
|
2024-08-13 22:56:41 +00:00
|
|
|
swake_up_one_online(&rdp_gp->nocb_gp_wq);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return needwake;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kick the GP kthread for this NOCB group.
|
|
|
|
*/
|
|
|
|
static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
|
|
|
|
return __wake_nocb_gp(rdp_gp, rdp, force, flags);
|
|
|
|
}
|
|
|
|
|
2023-11-15 19:11:26 +00:00
|
|
|
#ifdef CONFIG_RCU_LAZY
|
2022-10-16 16:22:54 +00:00
|
|
|
/*
|
|
|
|
* LAZY_FLUSH_JIFFIES decides the maximum amount of time that
|
|
|
|
* can elapse before lazy callbacks are flushed. Lazy callbacks
|
|
|
|
* could be flushed much earlier for a number of other reasons
|
|
|
|
* however, LAZY_FLUSH_JIFFIES will ensure no lazy callbacks are
|
|
|
|
* left unsubmitted to RCU after those many jiffies.
|
|
|
|
*/
|
|
|
|
#define LAZY_FLUSH_JIFFIES (10 * HZ)
|
2023-11-15 19:11:26 +00:00
|
|
|
static unsigned long jiffies_lazy_flush = LAZY_FLUSH_JIFFIES;
|
2022-10-16 16:22:54 +00:00
|
|
|
|
|
|
|
// To be called only from test code.
|
2023-11-15 19:11:26 +00:00
|
|
|
void rcu_set_jiffies_lazy_flush(unsigned long jif)
|
2022-10-16 16:22:54 +00:00
|
|
|
{
|
2023-11-15 19:11:26 +00:00
|
|
|
jiffies_lazy_flush = jif;
|
2022-10-16 16:22:54 +00:00
|
|
|
}
|
2023-11-15 19:11:26 +00:00
|
|
|
EXPORT_SYMBOL(rcu_set_jiffies_lazy_flush);
|
2022-10-16 16:22:54 +00:00
|
|
|
|
2023-11-15 19:11:26 +00:00
|
|
|
unsigned long rcu_get_jiffies_lazy_flush(void)
|
2022-10-16 16:22:54 +00:00
|
|
|
{
|
2023-11-15 19:11:26 +00:00
|
|
|
return jiffies_lazy_flush;
|
2022-10-16 16:22:54 +00:00
|
|
|
}
|
2023-11-15 19:11:26 +00:00
|
|
|
EXPORT_SYMBOL(rcu_get_jiffies_lazy_flush);
|
2022-10-16 16:22:54 +00:00
|
|
|
#endif
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
/*
|
|
|
|
* Arrange to wake the GP kthread for this NOCB group at some future
|
|
|
|
* time when it is safe to do so.
|
|
|
|
*/
|
|
|
|
static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
|
|
|
|
const char *reason)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
|
|
|
|
|
|
|
|
/*
|
2022-10-16 16:22:54 +00:00
|
|
|
* Bypass wakeup overrides previous deferments. In case of
|
|
|
|
* callback storms, no need to wake up too early.
|
2021-05-19 00:09:28 +00:00
|
|
|
*/
|
2022-10-16 16:22:54 +00:00
|
|
|
if (waketype == RCU_NOCB_WAKE_LAZY &&
|
|
|
|
rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT) {
|
2023-11-15 19:11:26 +00:00
|
|
|
mod_timer(&rdp_gp->nocb_timer, jiffies + rcu_get_jiffies_lazy_flush());
|
2022-10-16 16:22:54 +00:00
|
|
|
WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
|
|
|
|
} else if (waketype == RCU_NOCB_WAKE_BYPASS) {
|
2021-05-19 00:09:28 +00:00
|
|
|
mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
|
|
|
|
WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
|
|
|
|
} else {
|
|
|
|
if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
|
|
|
|
mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
|
|
|
|
if (rdp_gp->nocb_defer_wakeup < waketype)
|
|
|
|
WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
|
|
|
|
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
|
|
|
|
* However, if there is a callback to be enqueued and if ->nocb_bypass
|
|
|
|
* proves to be initially empty, just return false because the no-CB GP
|
|
|
|
* kthread may need to be awakened in this case.
|
|
|
|
*
|
2022-10-16 16:22:54 +00:00
|
|
|
* Return true if there was something to be flushed and it succeeded, otherwise
|
|
|
|
* false.
|
|
|
|
*
|
2021-05-19 00:09:28 +00:00
|
|
|
* Note that this function always returns true if rhp is NULL.
|
|
|
|
*/
|
2022-10-16 16:22:55 +00:00
|
|
|
static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp_in,
|
2022-10-16 16:22:54 +00:00
|
|
|
unsigned long j, bool lazy)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
|
|
|
struct rcu_cblist rcl;
|
2022-10-16 16:22:55 +00:00
|
|
|
struct rcu_head *rhp = rhp_in;
|
2021-05-19 00:09:28 +00:00
|
|
|
|
|
|
|
WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
|
|
|
|
rcu_lockdep_assert_cblist_protected(rdp);
|
|
|
|
lockdep_assert_held(&rdp->nocb_bypass_lock);
|
|
|
|
if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
|
|
|
|
raw_spin_unlock(&rdp->nocb_bypass_lock);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
|
|
|
|
if (rhp)
|
|
|
|
rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
|
2022-10-16 16:22:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the new CB requested was a lazy one, queue it onto the main
|
2022-10-16 16:22:55 +00:00
|
|
|
* ->cblist so that we can take advantage of the grace-period that will
|
|
|
|
* happen regardless. But queue it onto the bypass list first so that
|
|
|
|
* the lazy CB is ordered with the existing CBs in the bypass list.
|
2022-10-16 16:22:54 +00:00
|
|
|
*/
|
|
|
|
if (lazy && rhp) {
|
2022-10-16 16:22:55 +00:00
|
|
|
rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
|
|
|
|
rhp = NULL;
|
2022-10-16 16:22:54 +00:00
|
|
|
}
|
2022-10-16 16:22:55 +00:00
|
|
|
rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
|
|
|
|
WRITE_ONCE(rdp->lazy_len, 0);
|
2022-10-16 16:22:54 +00:00
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
|
|
|
|
WRITE_ONCE(rdp->nocb_bypass_first, j);
|
|
|
|
rcu_nocb_bypass_unlock(rdp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
|
|
|
|
* However, if there is a callback to be enqueued and if ->nocb_bypass
|
|
|
|
* proves to be initially empty, just return false because the no-CB GP
|
|
|
|
* kthread may need to be awakened in this case.
|
|
|
|
*
|
|
|
|
* Note that this function always returns true if rhp is NULL.
|
|
|
|
*/
|
|
|
|
static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
|
2022-10-16 16:22:54 +00:00
|
|
|
unsigned long j, bool lazy)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
|
|
|
if (!rcu_rdp_is_offloaded(rdp))
|
|
|
|
return true;
|
|
|
|
rcu_lockdep_assert_cblist_protected(rdp);
|
|
|
|
rcu_nocb_bypass_lock(rdp);
|
2022-10-16 16:22:54 +00:00
|
|
|
return rcu_nocb_do_flush_bypass(rdp, rhp, j, lazy);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the ->nocb_bypass_lock is immediately available, flush the
|
|
|
|
* ->nocb_bypass queue into ->cblist.
|
|
|
|
*/
|
|
|
|
static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
|
|
|
|
{
|
|
|
|
rcu_lockdep_assert_cblist_protected(rdp);
|
|
|
|
if (!rcu_rdp_is_offloaded(rdp) ||
|
|
|
|
!rcu_nocb_bypass_trylock(rdp))
|
|
|
|
return;
|
2022-10-16 16:22:54 +00:00
|
|
|
WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j, false));
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See whether it is appropriate to use the ->nocb_bypass list in order
|
|
|
|
* to control contention on ->nocb_lock. A limited number of direct
|
|
|
|
* enqueues are permitted into ->cblist per jiffy. If ->nocb_bypass
|
|
|
|
* is non-empty, further callbacks must be placed into ->nocb_bypass,
|
|
|
|
* otherwise rcu_barrier() breaks. Use rcu_nocb_flush_bypass() to switch
|
|
|
|
* back to direct use of ->cblist. However, ->nocb_bypass should not be
|
|
|
|
* used if ->cblist is empty, because otherwise callbacks can be stranded
|
|
|
|
* on ->nocb_bypass because we cannot count on the current CPU ever again
|
|
|
|
* invoking call_rcu(). The general rule is that if ->nocb_bypass is
|
|
|
|
* non-empty, the corresponding no-CBs grace-period kthread must not be
|
|
|
|
* in an indefinite sleep state.
|
|
|
|
*
|
|
|
|
* Finally, it is not permitted to use the bypass during early boot,
|
|
|
|
* as doing so would confuse the auto-initialization code. Besides
|
|
|
|
* which, there is no point in worrying about lock contention while
|
|
|
|
* there is only one CPU in operation.
|
|
|
|
*/
|
|
|
|
static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
|
2022-10-16 16:22:54 +00:00
|
|
|
bool *was_alldone, unsigned long flags,
|
|
|
|
bool lazy)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
|
|
|
unsigned long c;
|
|
|
|
unsigned long cur_gp_seq;
|
|
|
|
unsigned long j = jiffies;
|
|
|
|
long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
|
2022-10-16 16:22:54 +00:00
|
|
|
bool bypass_is_lazy = (ncbs == READ_ONCE(rdp->lazy_len));
|
2021-05-19 00:09:28 +00:00
|
|
|
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
|
|
|
|
// Pure softirq/rcuc based processing: no bypassing, no
|
|
|
|
// locking.
|
|
|
|
if (!rcu_rdp_is_offloaded(rdp)) {
|
|
|
|
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Don't use ->nocb_bypass during early boot.
|
|
|
|
if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
|
|
|
|
rcu_nocb_lock(rdp);
|
|
|
|
WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
|
|
|
|
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have advanced to a new jiffy, reset counts to allow
|
|
|
|
// moving back from ->nocb_bypass to ->cblist.
|
|
|
|
if (j == rdp->nocb_nobypass_last) {
|
|
|
|
c = rdp->nocb_nobypass_count + 1;
|
|
|
|
} else {
|
|
|
|
WRITE_ONCE(rdp->nocb_nobypass_last, j);
|
|
|
|
c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
|
|
|
|
if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
|
|
|
|
nocb_nobypass_lim_per_jiffy))
|
|
|
|
c = 0;
|
|
|
|
else if (c > nocb_nobypass_lim_per_jiffy)
|
|
|
|
c = nocb_nobypass_lim_per_jiffy;
|
|
|
|
}
|
|
|
|
WRITE_ONCE(rdp->nocb_nobypass_count, c);
|
|
|
|
|
|
|
|
// If there hasn't yet been all that many ->cblist enqueues
|
|
|
|
// this jiffy, tell the caller to enqueue onto ->cblist. But flush
|
|
|
|
// ->nocb_bypass first.
|
2022-10-16 16:22:54 +00:00
|
|
|
// Lazy CBs throttle this back and do immediate bypass queuing.
|
|
|
|
if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy && !lazy) {
|
2021-05-19 00:09:28 +00:00
|
|
|
rcu_nocb_lock(rdp);
|
|
|
|
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
|
|
|
|
if (*was_alldone)
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
|
|
|
|
TPS("FirstQ"));
|
2022-10-16 16:22:54 +00:00
|
|
|
|
|
|
|
WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j, false));
|
2021-05-19 00:09:28 +00:00
|
|
|
WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
|
|
|
|
return false; // Caller must enqueue the callback.
|
|
|
|
}
|
|
|
|
|
|
|
|
// If ->nocb_bypass has been used too long or is too full,
|
|
|
|
// flush ->nocb_bypass to ->cblist.
|
2022-10-16 16:22:54 +00:00
|
|
|
if ((ncbs && !bypass_is_lazy && j != READ_ONCE(rdp->nocb_bypass_first)) ||
|
|
|
|
(ncbs && bypass_is_lazy &&
|
2023-11-15 19:11:26 +00:00
|
|
|
(time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()))) ||
|
2021-05-19 00:09:28 +00:00
|
|
|
ncbs >= qhimark) {
|
|
|
|
rcu_nocb_lock(rdp);
|
2022-09-17 16:41:59 +00:00
|
|
|
*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
|
|
|
|
|
2022-10-16 16:22:54 +00:00
|
|
|
if (!rcu_nocb_flush_bypass(rdp, rhp, j, lazy)) {
|
2021-05-19 00:09:28 +00:00
|
|
|
if (*was_alldone)
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
|
|
|
|
TPS("FirstQ"));
|
|
|
|
WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
|
|
|
|
return false; // Caller must enqueue the callback.
|
|
|
|
}
|
|
|
|
if (j != rdp->nocb_gp_adv_time &&
|
|
|
|
rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
|
|
|
|
rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
|
|
|
|
rcu_advance_cbs_nowake(rdp->mynode, rdp);
|
|
|
|
rdp->nocb_gp_adv_time = j;
|
|
|
|
}
|
2022-09-17 16:41:59 +00:00
|
|
|
|
|
|
|
// The flush succeeded and we moved CBs into the regular list.
|
|
|
|
// Don't wait for the wake up timer as it may be too far ahead.
|
|
|
|
// Wake up the GP thread now instead, if the cblist was empty.
|
|
|
|
__call_rcu_nocb_wake(rdp, *was_alldone, flags);
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
return true; // Callback already enqueued.
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to use the bypass.
|
|
|
|
rcu_nocb_bypass_lock(rdp);
|
|
|
|
ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
|
|
|
|
rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
|
|
|
|
rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
|
2022-10-16 16:22:54 +00:00
|
|
|
|
|
|
|
if (lazy)
|
|
|
|
WRITE_ONCE(rdp->lazy_len, rdp->lazy_len + 1);
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
if (!ncbs) {
|
|
|
|
WRITE_ONCE(rdp->nocb_bypass_first, j);
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
|
|
|
|
}
|
|
|
|
rcu_nocb_bypass_unlock(rdp);
|
2024-08-13 22:56:42 +00:00
|
|
|
|
2022-10-16 16:22:54 +00:00
|
|
|
// A wake up of the grace period kthread or timer adjustment
|
|
|
|
// needs to be done only if:
|
|
|
|
// 1. Bypass list was fully empty before (this is the first
|
|
|
|
// bypass list entry), or:
|
|
|
|
// 2. Both of these conditions are met:
|
|
|
|
// a. The bypass list previously had only lazy CBs, and:
|
|
|
|
// b. The new CB is non-lazy.
|
2024-01-09 22:24:00 +00:00
|
|
|
if (!ncbs || (bypass_is_lazy && !lazy)) {
|
2021-05-19 00:09:28 +00:00
|
|
|
// No-CBs GP kthread might be indefinitely asleep, if so, wake.
|
|
|
|
rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
|
|
|
|
if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
|
|
|
|
TPS("FirstBQwake"));
|
|
|
|
__call_rcu_nocb_wake(rdp, true, flags);
|
|
|
|
} else {
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
|
|
|
|
TPS("FirstBQnoWake"));
|
2024-01-09 22:24:00 +00:00
|
|
|
rcu_nocb_unlock(rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true; // Callback already enqueued.
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Awaken the no-CBs grace-period kthread if needed, either due to it
|
|
|
|
* legitimately being asleep or due to overload conditions.
|
|
|
|
*
|
|
|
|
* If warranted, also wake up the kthread servicing this CPUs queues.
|
|
|
|
*/
|
|
|
|
static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
|
|
|
|
unsigned long flags)
|
|
|
|
__releases(rdp->nocb_lock)
|
|
|
|
{
|
2022-10-16 16:22:54 +00:00
|
|
|
long bypass_len;
|
2021-05-19 00:09:28 +00:00
|
|
|
unsigned long cur_gp_seq;
|
|
|
|
unsigned long j;
|
2022-10-16 16:22:54 +00:00
|
|
|
long lazy_len;
|
2021-05-19 00:09:28 +00:00
|
|
|
long len;
|
|
|
|
struct task_struct *t;
|
2024-01-17 10:26:16 +00:00
|
|
|
struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
|
2021-05-19 00:09:28 +00:00
|
|
|
|
|
|
|
// If we are being polled or there is no kthread, just leave.
|
|
|
|
t = READ_ONCE(rdp->nocb_gp_kthread);
|
|
|
|
if (rcu_nocb_poll || !t) {
|
2024-01-09 22:24:00 +00:00
|
|
|
rcu_nocb_unlock(rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
|
|
|
|
TPS("WakeNotPoll"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Need to actually to a wakeup.
|
|
|
|
len = rcu_segcblist_n_cbs(&rdp->cblist);
|
2022-10-16 16:22:54 +00:00
|
|
|
bypass_len = rcu_cblist_n_cbs(&rdp->nocb_bypass);
|
|
|
|
lazy_len = READ_ONCE(rdp->lazy_len);
|
2021-05-19 00:09:28 +00:00
|
|
|
if (was_alldone) {
|
|
|
|
rdp->qlen_last_fqs_check = len;
|
2022-10-16 16:22:54 +00:00
|
|
|
// Only lazy CBs in bypass list
|
|
|
|
if (lazy_len && bypass_len == lazy_len) {
|
2024-01-09 22:24:00 +00:00
|
|
|
rcu_nocb_unlock(rdp);
|
2022-10-16 16:22:54 +00:00
|
|
|
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_LAZY,
|
|
|
|
TPS("WakeLazy"));
|
2024-10-10 16:36:09 +00:00
|
|
|
} else if (!irqs_disabled_flags(flags) && cpu_online(rdp->cpu)) {
|
2021-05-19 00:09:28 +00:00
|
|
|
/* ... if queue was empty ... */
|
2024-01-09 22:24:00 +00:00
|
|
|
rcu_nocb_unlock(rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
wake_nocb_gp(rdp, false);
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
|
|
|
|
TPS("WakeEmpty"));
|
|
|
|
} else {
|
2024-10-10 16:36:09 +00:00
|
|
|
/*
|
|
|
|
* Don't do the wake-up upfront on fragile paths.
|
|
|
|
* Also offline CPUs can't call swake_up_one_online() from
|
|
|
|
* (soft-)IRQs. Rely on the final deferred wake-up from
|
|
|
|
* rcutree_report_cpu_dead()
|
|
|
|
*/
|
2024-01-09 22:24:00 +00:00
|
|
|
rcu_nocb_unlock(rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
|
|
|
|
TPS("WakeEmptyIsDeferred"));
|
|
|
|
}
|
|
|
|
} else if (len > rdp->qlen_last_fqs_check + qhimark) {
|
|
|
|
/* ... or if many callbacks queued. */
|
|
|
|
rdp->qlen_last_fqs_check = len;
|
|
|
|
j = jiffies;
|
|
|
|
if (j != rdp->nocb_gp_adv_time &&
|
|
|
|
rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
|
|
|
|
rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
|
|
|
|
rcu_advance_cbs_nowake(rdp->mynode, rdp);
|
|
|
|
rdp->nocb_gp_adv_time = j;
|
|
|
|
}
|
|
|
|
smp_mb(); /* Enqueue before timer_pending(). */
|
|
|
|
if ((rdp->nocb_cb_sleep ||
|
|
|
|
!rcu_segcblist_ready_cbs(&rdp->cblist)) &&
|
2024-01-17 10:26:16 +00:00
|
|
|
!timer_pending(&rdp_gp->nocb_timer)) {
|
2024-01-09 22:24:00 +00:00
|
|
|
rcu_nocb_unlock(rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
|
|
|
|
TPS("WakeOvfIsDeferred"));
|
|
|
|
} else {
|
2024-01-09 22:24:00 +00:00
|
|
|
rcu_nocb_unlock(rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
|
|
|
|
}
|
|
|
|
} else {
|
2024-01-09 22:24:00 +00:00
|
|
|
rcu_nocb_unlock(rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-09 22:24:01 +00:00
|
|
|
static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
|
|
|
|
rcu_callback_t func, unsigned long flags, bool lazy)
|
|
|
|
{
|
|
|
|
bool was_alldone;
|
|
|
|
|
|
|
|
if (!rcu_nocb_try_bypass(rdp, head, &was_alldone, flags, lazy)) {
|
|
|
|
/* Not enqueued on bypass but locked, do regular enqueue */
|
|
|
|
rcutree_enqueue(rdp, head, func);
|
|
|
|
__call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-03 22:56:40 +00:00
|
|
|
static void nocb_gp_toggle_rdp(struct rcu_data *rdp_gp, struct rcu_data *rdp)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
|
|
|
struct rcu_segcblist *cblist = &rdp->cblist;
|
2022-04-19 12:23:18 +00:00
|
|
|
unsigned long flags;
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2024-07-03 22:56:40 +00:00
|
|
|
/*
|
|
|
|
* Locking orders future de-offloaded callbacks enqueue against previous
|
|
|
|
* handling of this rdp. Ie: Make sure rcuog is done with this rdp before
|
|
|
|
* deoffloaded callbacks can be enqueued.
|
|
|
|
*/
|
|
|
|
raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
|
|
|
|
if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
|
2022-04-19 12:23:18 +00:00
|
|
|
/*
|
|
|
|
* Offloading. Set our flag and notify the offload worker.
|
|
|
|
* We will handle this rdp until it ever gets de-offloaded.
|
|
|
|
*/
|
2024-07-03 22:56:40 +00:00
|
|
|
list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
|
|
|
|
rcu_segcblist_set_flags(cblist, SEGCBLIST_OFFLOADED);
|
|
|
|
} else {
|
2022-04-19 12:23:18 +00:00
|
|
|
/*
|
|
|
|
* De-offloading. Clear our flag and notify the de-offload worker.
|
|
|
|
* We will ignore this rdp until it ever gets re-offloaded.
|
|
|
|
*/
|
2024-07-03 22:56:40 +00:00
|
|
|
list_del(&rdp->nocb_entry_rdp);
|
|
|
|
rcu_segcblist_clear_flags(cblist, SEGCBLIST_OFFLOADED);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
2024-07-03 22:56:40 +00:00
|
|
|
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
2022-04-19 12:23:18 +00:00
|
|
|
}
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2022-06-11 11:00:44 +00:00
|
|
|
static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu)
|
|
|
|
{
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
|
|
|
|
swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
|
|
|
|
!READ_ONCE(my_rdp->nocb_gp_sleep));
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
|
|
|
|
}
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
/*
|
|
|
|
* No-CBs GP kthreads come here to wait for additional callbacks to show up
|
|
|
|
* or for grace periods to end.
|
|
|
|
*/
|
|
|
|
static void nocb_gp_wait(struct rcu_data *my_rdp)
|
|
|
|
{
|
|
|
|
bool bypass = false;
|
|
|
|
int __maybe_unused cpu = my_rdp->cpu;
|
|
|
|
unsigned long cur_gp_seq;
|
|
|
|
unsigned long flags;
|
|
|
|
bool gotcbs = false;
|
|
|
|
unsigned long j = jiffies;
|
2022-10-16 16:22:54 +00:00
|
|
|
bool lazy = false;
|
2021-05-19 00:09:28 +00:00
|
|
|
bool needwait_gp = false; // This prevents actual uninitialized use.
|
|
|
|
bool needwake;
|
|
|
|
bool needwake_gp;
|
2022-04-19 12:23:18 +00:00
|
|
|
struct rcu_data *rdp, *rdp_toggling = NULL;
|
2021-05-19 00:09:28 +00:00
|
|
|
struct rcu_node *rnp;
|
|
|
|
unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
|
|
|
|
bool wasempty = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each pass through the following loop checks for CBs and for the
|
|
|
|
* nearest grace period (if any) to wait for next. The CB kthreads
|
|
|
|
* and the global grace-period kthread are awakened if needed.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
|
2021-11-23 00:37:03 +00:00
|
|
|
/*
|
|
|
|
* An rcu_data structure is removed from the list after its
|
|
|
|
* CPU is de-offloaded and added to the list before that CPU is
|
|
|
|
* (re-)offloaded. If the following loop happens to be referencing
|
|
|
|
* that rcu_data structure during the time that the corresponding
|
|
|
|
* CPU is de-offloaded and then immediately re-offloaded, this
|
|
|
|
* loop's rdp pointer will be carried to the end of the list by
|
|
|
|
* the resulting pair of list operations. This can cause the loop
|
|
|
|
* to skip over some of the rcu_data structures that were supposed
|
|
|
|
* to have been scanned. Fortunately a new iteration through the
|
|
|
|
* entire loop is forced after a given CPU's rcu_data structure
|
|
|
|
* is added to the list, so the skipped-over rcu_data structures
|
|
|
|
* won't be ignored for long.
|
|
|
|
*/
|
2022-04-19 12:23:18 +00:00
|
|
|
list_for_each_entry(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp) {
|
2022-10-16 16:22:54 +00:00
|
|
|
long bypass_ncbs;
|
|
|
|
bool flush_bypass = false;
|
|
|
|
long lazy_ncbs;
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
|
|
|
|
rcu_nocb_lock_irqsave(rdp, flags);
|
2022-04-19 12:23:18 +00:00
|
|
|
lockdep_assert_held(&rdp->nocb_lock);
|
2021-05-19 00:09:28 +00:00
|
|
|
bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
|
2022-10-16 16:22:54 +00:00
|
|
|
lazy_ncbs = READ_ONCE(rdp->lazy_len);
|
|
|
|
|
|
|
|
if (bypass_ncbs && (lazy_ncbs == bypass_ncbs) &&
|
2023-11-15 19:11:26 +00:00
|
|
|
(time_after(j, READ_ONCE(rdp->nocb_bypass_first) + rcu_get_jiffies_lazy_flush()) ||
|
2022-10-16 16:22:54 +00:00
|
|
|
bypass_ncbs > 2 * qhimark)) {
|
|
|
|
flush_bypass = true;
|
|
|
|
} else if (bypass_ncbs && (lazy_ncbs != bypass_ncbs) &&
|
2021-05-19 00:09:28 +00:00
|
|
|
(time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
|
|
|
|
bypass_ncbs > 2 * qhimark)) {
|
2022-10-16 16:22:54 +00:00
|
|
|
flush_bypass = true;
|
2021-05-19 00:09:28 +00:00
|
|
|
} else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
|
|
|
|
rcu_nocb_unlock_irqrestore(rdp, flags);
|
|
|
|
continue; /* No callbacks here, try next. */
|
|
|
|
}
|
2022-10-16 16:22:54 +00:00
|
|
|
|
|
|
|
if (flush_bypass) {
|
|
|
|
// Bypass full or old, so flush it.
|
|
|
|
(void)rcu_nocb_try_flush_bypass(rdp, j);
|
|
|
|
bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
|
|
|
|
lazy_ncbs = READ_ONCE(rdp->lazy_len);
|
|
|
|
}
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
if (bypass_ncbs) {
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
|
2022-10-16 16:22:54 +00:00
|
|
|
bypass_ncbs == lazy_ncbs ? TPS("Lazy") : TPS("Bypass"));
|
|
|
|
if (bypass_ncbs == lazy_ncbs)
|
|
|
|
lazy = true;
|
|
|
|
else
|
|
|
|
bypass = true;
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
rnp = rdp->mynode;
|
|
|
|
|
|
|
|
// Advance callbacks if helpful and low contention.
|
|
|
|
needwake_gp = false;
|
|
|
|
if (!rcu_segcblist_restempty(&rdp->cblist,
|
|
|
|
RCU_NEXT_READY_TAIL) ||
|
|
|
|
(rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
|
|
|
|
rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
|
|
|
|
raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
|
|
|
|
needwake_gp = rcu_advance_cbs(rnp, rdp);
|
|
|
|
wasempty = rcu_segcblist_restempty(&rdp->cblist,
|
|
|
|
RCU_NEXT_READY_TAIL);
|
|
|
|
raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
|
|
|
|
}
|
|
|
|
// Need to wait on some grace period?
|
|
|
|
WARN_ON_ONCE(wasempty &&
|
|
|
|
!rcu_segcblist_restempty(&rdp->cblist,
|
|
|
|
RCU_NEXT_READY_TAIL));
|
|
|
|
if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
|
|
|
|
if (!needwait_gp ||
|
|
|
|
ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
|
|
|
|
wait_gp_seq = cur_gp_seq;
|
|
|
|
needwait_gp = true;
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
|
|
|
|
TPS("NeedWaitGP"));
|
|
|
|
}
|
|
|
|
if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
|
|
|
|
needwake = rdp->nocb_cb_sleep;
|
|
|
|
WRITE_ONCE(rdp->nocb_cb_sleep, false);
|
|
|
|
} else {
|
|
|
|
needwake = false;
|
|
|
|
}
|
|
|
|
rcu_nocb_unlock_irqrestore(rdp, flags);
|
|
|
|
if (needwake) {
|
|
|
|
swake_up_one(&rdp->nocb_cb_wq);
|
|
|
|
gotcbs = true;
|
|
|
|
}
|
|
|
|
if (needwake_gp)
|
|
|
|
rcu_gp_kthread_wake();
|
|
|
|
}
|
|
|
|
|
|
|
|
my_rdp->nocb_gp_bypass = bypass;
|
|
|
|
my_rdp->nocb_gp_gp = needwait_gp;
|
|
|
|
my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
|
|
|
|
|
2022-10-16 16:22:54 +00:00
|
|
|
// At least one child with non-empty ->nocb_bypass, so set
|
|
|
|
// timer in order to avoid stranding its callbacks.
|
|
|
|
if (!rcu_nocb_poll) {
|
|
|
|
// If bypass list only has lazy CBs. Add a deferred lazy wake up.
|
|
|
|
if (lazy && !bypass) {
|
|
|
|
wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_LAZY,
|
|
|
|
TPS("WakeLazyIsDeferred"));
|
|
|
|
// Otherwise add a deferred bypass wake up.
|
|
|
|
} else if (bypass) {
|
|
|
|
wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
|
|
|
|
TPS("WakeBypassIsDeferred"));
|
|
|
|
}
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
2022-10-16 16:22:54 +00:00
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
if (rcu_nocb_poll) {
|
|
|
|
/* Polling, so trace if first poll in the series. */
|
|
|
|
if (gotcbs)
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
|
2022-06-11 11:00:44 +00:00
|
|
|
if (list_empty(&my_rdp->nocb_head_rdp)) {
|
|
|
|
raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
|
|
|
|
if (!my_rdp->nocb_toggling_rdp)
|
|
|
|
WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
|
|
|
|
raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
|
|
|
|
/* Wait for any offloading rdp */
|
|
|
|
nocb_gp_sleep(my_rdp, cpu);
|
|
|
|
} else {
|
|
|
|
schedule_timeout_idle(1);
|
|
|
|
}
|
2021-05-19 00:09:28 +00:00
|
|
|
} else if (!needwait_gp) {
|
|
|
|
/* Wait for callbacks to appear. */
|
2022-06-11 11:00:44 +00:00
|
|
|
nocb_gp_sleep(my_rdp, cpu);
|
2021-05-19 00:09:28 +00:00
|
|
|
} else {
|
|
|
|
rnp = my_rdp->mynode;
|
|
|
|
trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
|
|
|
|
swait_event_interruptible_exclusive(
|
|
|
|
rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
|
|
|
|
rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
|
|
|
|
!READ_ONCE(my_rdp->nocb_gp_sleep));
|
|
|
|
trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
|
|
|
|
}
|
2022-04-19 12:23:18 +00:00
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
if (!rcu_nocb_poll) {
|
|
|
|
raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
|
2022-04-19 12:23:18 +00:00
|
|
|
// (De-)queue an rdp to/from the group if its nocb state is changing
|
|
|
|
rdp_toggling = my_rdp->nocb_toggling_rdp;
|
|
|
|
if (rdp_toggling)
|
|
|
|
my_rdp->nocb_toggling_rdp = NULL;
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
|
|
|
|
WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
|
|
|
|
del_timer(&my_rdp->nocb_timer);
|
|
|
|
}
|
|
|
|
WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
|
|
|
|
raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
|
2022-04-19 12:23:18 +00:00
|
|
|
} else {
|
|
|
|
rdp_toggling = READ_ONCE(my_rdp->nocb_toggling_rdp);
|
|
|
|
if (rdp_toggling) {
|
|
|
|
/*
|
|
|
|
* Paranoid locking to make sure nocb_toggling_rdp is well
|
|
|
|
* reset *before* we (re)set SEGCBLIST_KTHREAD_GP or we could
|
|
|
|
* race with another round of nocb toggling for this rdp.
|
|
|
|
* Nocb locking should prevent from that already but we stick
|
|
|
|
* to paranoia, especially in rare path.
|
|
|
|
*/
|
|
|
|
raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
|
|
|
|
my_rdp->nocb_toggling_rdp = NULL;
|
|
|
|
raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rdp_toggling) {
|
2024-07-03 22:56:40 +00:00
|
|
|
nocb_gp_toggle_rdp(my_rdp, rdp_toggling);
|
2024-04-25 14:18:34 +00:00
|
|
|
swake_up_one(&rdp_toggling->nocb_state_wq);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
2022-04-19 12:23:18 +00:00
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
my_rdp->nocb_gp_seq = -1;
|
|
|
|
WARN_ON(signal_pending(current));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No-CBs grace-period-wait kthread. There is one of these per group
|
|
|
|
* of CPUs, but only once at least one CPU in that group has come online
|
|
|
|
* at least once since boot. This kthread checks for newly posted
|
|
|
|
* callbacks from any of the CPUs it is responsible for, waits for a
|
|
|
|
* grace period, then awakens all of the rcu_nocb_cb_kthread() instances
|
|
|
|
* that then have callback-invocation work to do.
|
|
|
|
*/
|
|
|
|
static int rcu_nocb_gp_kthread(void *arg)
|
|
|
|
{
|
|
|
|
struct rcu_data *rdp = arg;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
|
|
|
|
nocb_gp_wait(rdp);
|
|
|
|
cond_resched_tasks_rcu_qs();
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
|
|
|
|
{
|
2024-04-25 14:18:34 +00:00
|
|
|
return !READ_ONCE(rdp->nocb_cb_sleep) || kthread_should_park();
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Invoke any ready callbacks from the corresponding no-CBs CPU,
|
|
|
|
* then, if there are no more, wait for more to appear.
|
|
|
|
*/
|
|
|
|
static void nocb_cb_wait(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
struct rcu_segcblist *cblist = &rdp->cblist;
|
|
|
|
unsigned long cur_gp_seq;
|
|
|
|
unsigned long flags;
|
|
|
|
bool needwake_gp = false;
|
|
|
|
struct rcu_node *rnp = rdp->mynode;
|
|
|
|
|
2024-04-25 14:18:34 +00:00
|
|
|
swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
|
|
|
|
nocb_cb_wait_cond(rdp));
|
|
|
|
if (kthread_should_park()) {
|
2024-10-22 03:41:17 +00:00
|
|
|
/*
|
|
|
|
* kthread_park() must be preceded by an rcu_barrier().
|
|
|
|
* But yet another rcu_barrier() might have sneaked in between
|
|
|
|
* the barrier callback execution and the callbacks counter
|
|
|
|
* decrement.
|
|
|
|
*/
|
|
|
|
if (rdp->nocb_cb_sleep) {
|
|
|
|
rcu_nocb_lock_irqsave(rdp, flags);
|
|
|
|
WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
|
|
|
|
rcu_nocb_unlock_irqrestore(rdp, flags);
|
|
|
|
kthread_parkme();
|
|
|
|
}
|
2024-04-25 14:18:34 +00:00
|
|
|
} else if (READ_ONCE(rdp->nocb_cb_sleep)) {
|
|
|
|
WARN_ON(signal_pending(current));
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
|
|
|
|
}
|
2021-11-23 00:37:04 +00:00
|
|
|
|
2024-04-25 14:18:34 +00:00
|
|
|
WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
|
2021-11-23 00:37:04 +00:00
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
local_irq_save(flags);
|
2024-04-29 14:43:36 +00:00
|
|
|
rcu_momentary_eqs();
|
2021-05-19 00:09:28 +00:00
|
|
|
local_irq_restore(flags);
|
|
|
|
/*
|
|
|
|
* Disable BH to provide the expected environment. Also, when
|
|
|
|
* transitioning to/from NOCB mode, a self-requeuing callback might
|
|
|
|
* be invoked from softirq. A short grace period could cause both
|
|
|
|
* instances of this callback would execute concurrently.
|
|
|
|
*/
|
|
|
|
local_bh_disable();
|
|
|
|
rcu_do_batch(rdp);
|
|
|
|
local_bh_enable();
|
|
|
|
lockdep_assert_irqs_enabled();
|
|
|
|
rcu_nocb_lock_irqsave(rdp, flags);
|
|
|
|
if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
|
|
|
|
rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
|
|
|
|
raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
|
|
|
|
needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
|
|
|
|
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
|
|
|
|
}
|
|
|
|
|
2024-04-25 14:18:34 +00:00
|
|
|
if (!rcu_segcblist_ready_cbs(cblist)) {
|
|
|
|
WRITE_ONCE(rdp->nocb_cb_sleep, true);
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
|
2021-05-19 00:09:28 +00:00
|
|
|
} else {
|
2024-04-25 14:18:34 +00:00
|
|
|
WRITE_ONCE(rdp->nocb_cb_sleep, false);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rcu_nocb_unlock_irqrestore(rdp, flags);
|
|
|
|
if (needwake_gp)
|
|
|
|
rcu_gp_kthread_wake();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per-rcu_data kthread, but only for no-CBs CPUs. Repeatedly invoke
|
|
|
|
* nocb_cb_wait() to do the dirty work.
|
|
|
|
*/
|
|
|
|
static int rcu_nocb_cb_kthread(void *arg)
|
|
|
|
{
|
|
|
|
struct rcu_data *rdp = arg;
|
|
|
|
|
|
|
|
// Each pass through this loop does one callback batch, and,
|
|
|
|
// if there are no more ready callbacks, waits for them.
|
|
|
|
for (;;) {
|
|
|
|
nocb_cb_wait(rdp);
|
|
|
|
cond_resched_tasks_rcu_qs();
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is a deferred wakeup of rcu_nocb_kthread() required? */
|
|
|
|
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
|
|
|
|
{
|
|
|
|
return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do a deferred wakeup of rcu_nocb_kthread(). */
|
|
|
|
static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
|
|
|
|
struct rcu_data *rdp, int level,
|
|
|
|
unsigned long flags)
|
|
|
|
__releases(rdp_gp->nocb_gp_lock)
|
|
|
|
{
|
|
|
|
int ndw;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
|
|
|
|
raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
ndw = rdp_gp->nocb_defer_wakeup;
|
|
|
|
ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
|
|
|
|
static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
|
|
|
|
|
|
|
|
WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
|
|
|
|
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
|
|
|
|
smp_mb__after_spinlock(); /* Timer expire before wakeup. */
|
|
|
|
do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
|
|
|
|
* This means we do an inexact common-case check. Note that if
|
|
|
|
* we miss, ->nocb_timer will eventually clean things up.
|
|
|
|
*/
|
|
|
|
static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
|
|
|
|
|
|
|
|
if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
|
|
|
|
return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void rcu_nocb_flush_deferred_wakeup(void)
|
|
|
|
{
|
|
|
|
do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
|
|
|
|
|
2024-07-03 22:56:40 +00:00
|
|
|
static int rcu_nocb_queue_toggle_rdp(struct rcu_data *rdp)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
|
|
|
struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
|
|
|
|
bool wake_gp = false;
|
2024-07-03 22:56:40 +00:00
|
|
|
unsigned long flags;
|
2021-05-19 00:09:28 +00:00
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
|
2022-04-19 12:23:18 +00:00
|
|
|
// Queue this rdp for add/del to/from the list to iterate on rcuog
|
|
|
|
WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
if (rdp_gp->nocb_gp_sleep) {
|
|
|
|
rdp_gp->nocb_gp_sleep = false;
|
|
|
|
wake_gp = true;
|
|
|
|
}
|
|
|
|
raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
|
|
|
|
|
2022-04-19 12:23:20 +00:00
|
|
|
return wake_gp;
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
2024-07-03 22:56:40 +00:00
|
|
|
static bool rcu_nocb_rdp_deoffload_wait_cond(struct rcu_data *rdp)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2024-07-03 22:56:40 +00:00
|
|
|
bool ret;
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2022-04-19 12:23:20 +00:00
|
|
|
/*
|
2024-07-03 22:56:40 +00:00
|
|
|
* Locking makes sure rcuog is done handling this rdp before deoffloaded
|
|
|
|
* enqueue can happen. Also it keeps the SEGCBLIST_OFFLOADED flag stable
|
|
|
|
* while the ->nocb_lock is held.
|
2022-04-19 12:23:20 +00:00
|
|
|
*/
|
2024-07-03 22:56:40 +00:00
|
|
|
raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
|
|
|
|
ret = !rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
|
|
|
|
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2024-05-30 13:45:46 +00:00
|
|
|
static int rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2022-04-19 12:23:20 +00:00
|
|
|
int wake_gp;
|
|
|
|
struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2024-05-30 13:45:46 +00:00
|
|
|
/* CPU must be offline, unless it's early boot */
|
|
|
|
WARN_ON_ONCE(cpu_online(rdp->cpu) && rdp->cpu != raw_smp_processor_id());
|
2021-05-19 00:09:28 +00:00
|
|
|
|
|
|
|
pr_info("De-offloading %d\n", rdp->cpu);
|
|
|
|
|
2024-05-30 13:45:46 +00:00
|
|
|
/* Flush all callbacks from segcblist and bypass */
|
|
|
|
rcu_barrier();
|
|
|
|
|
2021-10-19 00:08:08 +00:00
|
|
|
/*
|
2024-07-03 22:56:40 +00:00
|
|
|
* Make sure the rcuoc kthread isn't in the middle of a nocb locked
|
|
|
|
* sequence while offloading is deactivated, along with nocb locking.
|
2021-10-19 00:08:08 +00:00
|
|
|
*/
|
2024-07-03 22:56:40 +00:00
|
|
|
if (rdp->nocb_cb_kthread)
|
|
|
|
kthread_park(rdp->nocb_cb_kthread);
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
rcu_nocb_lock_irqsave(rdp, flags);
|
2024-05-30 13:45:46 +00:00
|
|
|
WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
|
|
|
|
WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
|
2024-07-03 22:56:40 +00:00
|
|
|
rcu_nocb_unlock_irqrestore(rdp, flags);
|
2024-05-30 13:45:46 +00:00
|
|
|
|
2024-07-03 22:56:40 +00:00
|
|
|
wake_gp = rcu_nocb_queue_toggle_rdp(rdp);
|
2022-04-19 12:23:20 +00:00
|
|
|
|
|
|
|
mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
|
2024-07-03 22:56:40 +00:00
|
|
|
|
2022-04-19 12:23:20 +00:00
|
|
|
if (rdp_gp->nocb_gp_kthread) {
|
|
|
|
if (wake_gp)
|
|
|
|
wake_up_process(rdp_gp->nocb_gp_kthread);
|
|
|
|
|
|
|
|
swait_event_exclusive(rdp->nocb_state_wq,
|
2024-07-03 22:56:40 +00:00
|
|
|
rcu_nocb_rdp_deoffload_wait_cond(rdp));
|
2022-04-19 12:23:20 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* No kthread to clear the flags for us or remove the rdp from the nocb list
|
|
|
|
* to iterate. Do it here instead. Locking doesn't look stricly necessary
|
|
|
|
* but we stick to paranoia in this rare path.
|
|
|
|
*/
|
2024-07-03 22:56:40 +00:00
|
|
|
raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
|
|
|
|
rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
|
|
|
|
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
2022-04-19 12:23:20 +00:00
|
|
|
|
|
|
|
list_del(&rdp->nocb_entry_rdp);
|
|
|
|
}
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2024-07-03 22:56:40 +00:00
|
|
|
mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2022-04-19 12:23:20 +00:00
|
|
|
return 0;
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int rcu_nocb_cpu_deoffload(int cpu)
|
|
|
|
{
|
|
|
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
cpus_read_lock();
|
2024-05-30 13:45:45 +00:00
|
|
|
mutex_lock(&rcu_state.nocb_mutex);
|
2021-05-19 00:09:28 +00:00
|
|
|
if (rcu_rdp_is_offloaded(rdp)) {
|
2024-05-30 13:45:46 +00:00
|
|
|
if (!cpu_online(cpu)) {
|
|
|
|
ret = rcu_nocb_rdp_deoffload(rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
if (!ret)
|
|
|
|
cpumask_clear_cpu(cpu, rcu_nocb_mask);
|
|
|
|
} else {
|
2024-05-30 13:45:46 +00:00
|
|
|
pr_info("NOCB: Cannot CB-deoffload online CPU %d\n", rdp->cpu);
|
2021-05-19 00:09:28 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
2024-05-30 13:45:45 +00:00
|
|
|
mutex_unlock(&rcu_state.nocb_mutex);
|
2022-04-19 12:23:19 +00:00
|
|
|
cpus_read_unlock();
|
2021-05-19 00:09:28 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
|
|
|
|
|
2024-07-03 22:56:40 +00:00
|
|
|
static bool rcu_nocb_rdp_offload_wait_cond(struct rcu_data *rdp)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2024-07-03 22:56:40 +00:00
|
|
|
bool ret;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
|
|
|
|
ret = rcu_segcblist_test_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
|
|
|
|
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rcu_nocb_rdp_offload(struct rcu_data *rdp)
|
|
|
|
{
|
2022-04-19 12:23:20 +00:00
|
|
|
int wake_gp;
|
|
|
|
struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2024-05-30 13:45:46 +00:00
|
|
|
WARN_ON_ONCE(cpu_online(rdp->cpu));
|
2021-05-19 00:09:28 +00:00
|
|
|
/*
|
|
|
|
* For now we only support re-offload, ie: the rdp must have been
|
|
|
|
* offloaded on boot first.
|
|
|
|
*/
|
|
|
|
if (!rdp->nocb_gp_rdp)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2022-04-19 12:23:20 +00:00
|
|
|
if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
pr_info("Offloading %d\n", rdp->cpu);
|
2021-11-23 00:37:03 +00:00
|
|
|
|
2024-05-30 13:45:46 +00:00
|
|
|
WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
|
|
|
|
WARN_ON_ONCE(rcu_segcblist_n_cbs(&rdp->cblist));
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2024-07-03 22:56:40 +00:00
|
|
|
wake_gp = rcu_nocb_queue_toggle_rdp(rdp);
|
2022-04-19 12:23:20 +00:00
|
|
|
if (wake_gp)
|
|
|
|
wake_up_process(rdp_gp->nocb_gp_kthread);
|
2024-04-25 14:18:34 +00:00
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
swait_event_exclusive(rdp->nocb_state_wq,
|
2024-07-03 22:56:40 +00:00
|
|
|
rcu_nocb_rdp_offload_wait_cond(rdp));
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2024-07-03 22:56:40 +00:00
|
|
|
kthread_unpark(rdp->nocb_cb_kthread);
|
2021-10-19 00:08:07 +00:00
|
|
|
|
2022-04-19 12:23:20 +00:00
|
|
|
return 0;
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int rcu_nocb_cpu_offload(int cpu)
|
|
|
|
{
|
|
|
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
cpus_read_lock();
|
2024-05-30 13:45:45 +00:00
|
|
|
mutex_lock(&rcu_state.nocb_mutex);
|
2021-05-19 00:09:28 +00:00
|
|
|
if (!rcu_rdp_is_offloaded(rdp)) {
|
2024-05-30 13:45:46 +00:00
|
|
|
if (!cpu_online(cpu)) {
|
|
|
|
ret = rcu_nocb_rdp_offload(rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
if (!ret)
|
|
|
|
cpumask_set_cpu(cpu, rcu_nocb_mask);
|
|
|
|
} else {
|
2024-05-30 13:45:46 +00:00
|
|
|
pr_info("NOCB: Cannot CB-offload online CPU %d\n", rdp->cpu);
|
2021-05-19 00:09:28 +00:00
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
2024-05-30 13:45:45 +00:00
|
|
|
mutex_unlock(&rcu_state.nocb_mutex);
|
2022-04-19 12:23:19 +00:00
|
|
|
cpus_read_unlock();
|
2021-05-19 00:09:28 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
|
|
|
|
|
2023-01-30 07:17:37 +00:00
|
|
|
#ifdef CONFIG_RCU_LAZY
|
2022-10-16 16:22:56 +00:00
|
|
|
static unsigned long
|
|
|
|
lazy_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
unsigned long count = 0;
|
|
|
|
|
2023-03-29 16:02:03 +00:00
|
|
|
if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Protect rcu_nocb_mask against concurrent (de-)offloading. */
|
2024-05-30 13:45:45 +00:00
|
|
|
if (!mutex_trylock(&rcu_state.nocb_mutex))
|
2023-03-29 16:02:03 +00:00
|
|
|
return 0;
|
|
|
|
|
2022-10-16 16:22:56 +00:00
|
|
|
/* Snapshot count of all CPUs */
|
2023-03-29 16:02:03 +00:00
|
|
|
for_each_cpu(cpu, rcu_nocb_mask) {
|
2022-10-16 16:22:56 +00:00
|
|
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
|
|
|
|
|
count += READ_ONCE(rdp->lazy_len);
|
|
|
|
}
|
|
|
|
|
2024-05-30 13:45:45 +00:00
|
|
|
mutex_unlock(&rcu_state.nocb_mutex);
|
2023-03-29 16:02:03 +00:00
|
|
|
|
2022-10-16 16:22:56 +00:00
|
|
|
return count ? count : SHRINK_EMPTY;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned long
|
|
|
|
lazy_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned long count = 0;
|
|
|
|
|
2023-03-29 16:02:03 +00:00
|
|
|
if (WARN_ON_ONCE(!cpumask_available(rcu_nocb_mask)))
|
|
|
|
return 0;
|
2023-03-29 16:02:00 +00:00
|
|
|
/*
|
|
|
|
* Protect against concurrent (de-)offloading. Otherwise nocb locking
|
|
|
|
* may be ignored or imbalanced.
|
|
|
|
*/
|
2024-05-30 13:45:45 +00:00
|
|
|
if (!mutex_trylock(&rcu_state.nocb_mutex)) {
|
2023-03-29 16:02:00 +00:00
|
|
|
/*
|
2024-05-30 13:45:45 +00:00
|
|
|
* But really don't insist if nocb_mutex is contended since we
|
2023-03-29 16:02:00 +00:00
|
|
|
* can't guarantee that it will never engage in a dependency
|
|
|
|
* chain involving memory allocation. The lock is seldom contended
|
|
|
|
* anyway.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-10-16 16:22:56 +00:00
|
|
|
/* Snapshot count of all CPUs */
|
2023-03-29 16:02:03 +00:00
|
|
|
for_each_cpu(cpu, rcu_nocb_mask) {
|
2022-10-16 16:22:56 +00:00
|
|
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
2023-03-29 16:02:00 +00:00
|
|
|
int _count;
|
|
|
|
|
2023-03-29 16:02:03 +00:00
|
|
|
if (WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp)))
|
2023-03-29 16:02:00 +00:00
|
|
|
continue;
|
|
|
|
|
2023-03-29 16:02:02 +00:00
|
|
|
if (!READ_ONCE(rdp->lazy_len))
|
2022-10-16 16:22:56 +00:00
|
|
|
continue;
|
2023-03-29 16:02:00 +00:00
|
|
|
|
2022-10-16 16:22:56 +00:00
|
|
|
rcu_nocb_lock_irqsave(rdp, flags);
|
2023-03-29 16:02:02 +00:00
|
|
|
/*
|
|
|
|
* Recheck under the nocb lock. Since we are not holding the bypass
|
|
|
|
* lock we may still race with increments from the enqueuer but still
|
|
|
|
* we know for sure if there is at least one lazy callback.
|
|
|
|
*/
|
|
|
|
_count = READ_ONCE(rdp->lazy_len);
|
|
|
|
if (!_count) {
|
|
|
|
rcu_nocb_unlock_irqrestore(rdp, flags);
|
|
|
|
continue;
|
|
|
|
}
|
2024-01-10 08:11:28 +00:00
|
|
|
rcu_nocb_try_flush_bypass(rdp, jiffies);
|
2022-10-16 16:22:56 +00:00
|
|
|
rcu_nocb_unlock_irqrestore(rdp, flags);
|
|
|
|
wake_nocb_gp(rdp, false);
|
|
|
|
sc->nr_to_scan -= _count;
|
|
|
|
count += _count;
|
|
|
|
if (sc->nr_to_scan <= 0)
|
|
|
|
break;
|
|
|
|
}
|
2023-03-29 16:02:00 +00:00
|
|
|
|
2024-05-30 13:45:45 +00:00
|
|
|
mutex_unlock(&rcu_state.nocb_mutex);
|
2023-03-29 16:02:00 +00:00
|
|
|
|
2022-10-16 16:22:56 +00:00
|
|
|
return count ? count : SHRINK_STOP;
|
|
|
|
}
|
2023-01-30 07:17:37 +00:00
|
|
|
#endif // #ifdef CONFIG_RCU_LAZY
|
2022-10-16 16:22:56 +00:00
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
void __init rcu_init_nohz(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
struct rcu_data *rdp;
|
2022-09-13 03:00:36 +00:00
|
|
|
const struct cpumask *cpumask = NULL;
|
2023-09-11 09:44:14 +00:00
|
|
|
struct shrinker * __maybe_unused lazy_rcu_shrinker;
|
2022-04-22 17:52:47 +00:00
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
#if defined(CONFIG_NO_HZ_FULL)
|
2022-09-13 03:00:36 +00:00
|
|
|
if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask))
|
|
|
|
cpumask = tick_nohz_full_mask;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL) &&
|
|
|
|
!rcu_state.nocb_is_setup && !cpumask)
|
|
|
|
cpumask = cpu_possible_mask;
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2022-09-13 03:00:36 +00:00
|
|
|
if (cpumask) {
|
2021-11-23 00:37:05 +00:00
|
|
|
if (!cpumask_available(rcu_nocb_mask)) {
|
|
|
|
if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
|
|
|
|
pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
|
|
|
|
return;
|
|
|
|
}
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
2022-09-13 03:00:36 +00:00
|
|
|
|
|
|
|
cpumask_or(rcu_nocb_mask, rcu_nocb_mask, cpumask);
|
2022-02-14 13:23:39 +00:00
|
|
|
rcu_state.nocb_is_setup = true;
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
2021-11-23 00:37:05 +00:00
|
|
|
|
2022-02-14 13:23:39 +00:00
|
|
|
if (!rcu_state.nocb_is_setup)
|
2021-05-19 00:09:28 +00:00
|
|
|
return;
|
|
|
|
|
2023-01-30 07:17:37 +00:00
|
|
|
#ifdef CONFIG_RCU_LAZY
|
2023-09-11 09:44:14 +00:00
|
|
|
lazy_rcu_shrinker = shrinker_alloc(0, "rcu-lazy");
|
|
|
|
if (!lazy_rcu_shrinker) {
|
|
|
|
pr_err("Failed to allocate lazy_rcu shrinker!\n");
|
|
|
|
} else {
|
|
|
|
lazy_rcu_shrinker->count_objects = lazy_rcu_shrink_count;
|
|
|
|
lazy_rcu_shrinker->scan_objects = lazy_rcu_shrink_scan;
|
|
|
|
|
|
|
|
shrinker_register(lazy_rcu_shrinker);
|
|
|
|
}
|
2023-01-30 07:17:37 +00:00
|
|
|
#endif // #ifdef CONFIG_RCU_LAZY
|
2022-10-16 16:22:56 +00:00
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
|
|
|
|
pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
|
|
|
|
cpumask_and(rcu_nocb_mask, cpu_possible_mask,
|
|
|
|
rcu_nocb_mask);
|
|
|
|
}
|
|
|
|
if (cpumask_empty(rcu_nocb_mask))
|
|
|
|
pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
|
|
|
|
else
|
|
|
|
pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
|
|
|
|
cpumask_pr_args(rcu_nocb_mask));
|
|
|
|
if (rcu_nocb_poll)
|
|
|
|
pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
|
|
|
|
|
|
|
|
for_each_cpu(cpu, rcu_nocb_mask) {
|
|
|
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
|
if (rcu_segcblist_empty(&rdp->cblist))
|
|
|
|
rcu_segcblist_init(&rdp->cblist);
|
2024-07-03 22:56:40 +00:00
|
|
|
rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_OFFLOADED);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
rcu_organize_nocb_kthreads();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize per-rcu_data variables for no-CBs CPUs. */
|
|
|
|
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
init_swait_queue_head(&rdp->nocb_cb_wq);
|
|
|
|
init_swait_queue_head(&rdp->nocb_gp_wq);
|
|
|
|
init_swait_queue_head(&rdp->nocb_state_wq);
|
|
|
|
raw_spin_lock_init(&rdp->nocb_lock);
|
|
|
|
raw_spin_lock_init(&rdp->nocb_bypass_lock);
|
|
|
|
raw_spin_lock_init(&rdp->nocb_gp_lock);
|
|
|
|
timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
|
|
|
|
rcu_cblist_init(&rdp->nocb_bypass);
|
2022-10-16 16:22:54 +00:00
|
|
|
WRITE_ONCE(rdp->lazy_len, 0);
|
2021-12-11 17:01:39 +00:00
|
|
|
mutex_init(&rdp->nocb_gp_kthread_mutex);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the specified CPU is a no-CBs CPU that does not already have its
|
|
|
|
* rcuo CB kthread, spawn it. Additionally, if the rcuo GP kthread
|
|
|
|
* for this CPU's group has not yet been created, spawn it as well.
|
|
|
|
*/
|
2021-11-23 00:37:08 +00:00
|
|
|
static void rcu_spawn_cpu_nocb_kthread(int cpu)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
|
|
|
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
|
struct rcu_data *rdp_gp;
|
|
|
|
struct task_struct *t;
|
2022-01-11 23:32:51 +00:00
|
|
|
struct sched_param sp;
|
2021-05-19 00:09:28 +00:00
|
|
|
|
2022-02-14 13:23:39 +00:00
|
|
|
if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup)
|
2021-11-23 00:37:08 +00:00
|
|
|
return;
|
|
|
|
|
2021-11-23 00:37:06 +00:00
|
|
|
/* If there already is an rcuo kthread, then nothing to do. */
|
|
|
|
if (rdp->nocb_cb_kthread)
|
2021-05-19 00:09:28 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* If we didn't spawn the GP kthread first, reorganize! */
|
2022-01-11 23:32:51 +00:00
|
|
|
sp.sched_priority = kthread_prio;
|
2021-05-19 00:09:28 +00:00
|
|
|
rdp_gp = rdp->nocb_gp_rdp;
|
2021-12-11 17:01:39 +00:00
|
|
|
mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
|
2021-05-19 00:09:28 +00:00
|
|
|
if (!rdp_gp->nocb_gp_kthread) {
|
|
|
|
t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
|
|
|
|
"rcuog/%d", rdp_gp->cpu);
|
2021-12-11 17:01:39 +00:00
|
|
|
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) {
|
|
|
|
mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
|
2024-05-30 13:45:44 +00:00
|
|
|
goto err;
|
2021-12-11 17:01:39 +00:00
|
|
|
}
|
2021-05-19 00:09:28 +00:00
|
|
|
WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
|
2022-01-11 23:32:51 +00:00
|
|
|
if (kthread_prio)
|
|
|
|
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
2021-12-11 17:01:39 +00:00
|
|
|
mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
|
2021-05-19 00:09:28 +00:00
|
|
|
|
|
|
|
/* Spawn the kthread for this CPU. */
|
2024-04-25 14:18:34 +00:00
|
|
|
t = kthread_create(rcu_nocb_cb_kthread, rdp,
|
|
|
|
"rcuo%c/%d", rcu_state.abbr, cpu);
|
2021-05-19 00:09:28 +00:00
|
|
|
if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
|
2024-05-30 13:45:44 +00:00
|
|
|
goto err;
|
2022-01-11 23:32:52 +00:00
|
|
|
|
2024-04-25 14:18:34 +00:00
|
|
|
if (rcu_rdp_is_offloaded(rdp))
|
|
|
|
wake_up_process(t);
|
|
|
|
else
|
|
|
|
kthread_park(t);
|
|
|
|
|
rcu/nocb: Add option to opt rcuo kthreads out of RT priority
This commit introduces a RCU_NOCB_CPU_CB_BOOST Kconfig option that
prevents rcuo kthreads from running at real-time priority, even in
kernels built with RCU_BOOST. This capability is important to devices
needing low-latency (as in a few milliseconds) response from expedited
RCU grace periods, but which are not running a classic real-time workload.
On such devices, permitting the rcuo kthreads to run at real-time priority
results in unacceptable latencies imposed on the application tasks,
which run as SCHED_OTHER.
See for example the following trace output:
<snip>
<...>-60 [006] d..1 2979.028717: rcu_batch_start: rcu_preempt CBs=34619 bl=270
<snip>
If that rcuop kthread were permitted to run at real-time SCHED_FIFO
priority, it would monopolize its CPU for hundreds of milliseconds
while invoking those 34619 RCU callback functions, which would cause an
unacceptably long latency spike for many application stacks on Android
platforms.
However, some existing real-time workloads require that callback
invocation run at SCHED_FIFO priority, for example, those running on
systems with heavy SCHED_OTHER background loads. (It is the real-time
system's administrator's responsibility to make sure that important
real-time tasks run at a higher priority than do RCU's kthreads.)
Therefore, this new RCU_NOCB_CPU_CB_BOOST Kconfig option defaults to
"y" on kernels built with PREEMPT_RT and defaults to "n" otherwise.
The effect is to preserve current behavior for real-time systems, but for
other systems to allow expedited RCU grace periods to run with real-time
priority while continuing to invoke RCU callbacks as SCHED_OTHER.
As you would expect, this RCU_NOCB_CPU_CB_BOOST Kconfig option has no
effect except on CPUs with offloaded RCU callbacks.
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Acked-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Reviewed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>
2022-05-11 08:57:03 +00:00
|
|
|
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_CB_BOOST) && kthread_prio)
|
2022-01-11 23:32:52 +00:00
|
|
|
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
|
rcu/nocb: Add option to opt rcuo kthreads out of RT priority
This commit introduces a RCU_NOCB_CPU_CB_BOOST Kconfig option that
prevents rcuo kthreads from running at real-time priority, even in
kernels built with RCU_BOOST. This capability is important to devices
needing low-latency (as in a few milliseconds) response from expedited
RCU grace periods, but which are not running a classic real-time workload.
On such devices, permitting the rcuo kthreads to run at real-time priority
results in unacceptable latencies imposed on the application tasks,
which run as SCHED_OTHER.
See for example the following trace output:
<snip>
<...>-60 [006] d..1 2979.028717: rcu_batch_start: rcu_preempt CBs=34619 bl=270
<snip>
If that rcuop kthread were permitted to run at real-time SCHED_FIFO
priority, it would monopolize its CPU for hundreds of milliseconds
while invoking those 34619 RCU callback functions, which would cause an
unacceptably long latency spike for many application stacks on Android
platforms.
However, some existing real-time workloads require that callback
invocation run at SCHED_FIFO priority, for example, those running on
systems with heavy SCHED_OTHER background loads. (It is the real-time
system's administrator's responsibility to make sure that important
real-time tasks run at a higher priority than do RCU's kthreads.)
Therefore, this new RCU_NOCB_CPU_CB_BOOST Kconfig option defaults to
"y" on kernels built with PREEMPT_RT and defaults to "n" otherwise.
The effect is to preserve current behavior for real-time systems, but for
other systems to allow expedited RCU grace periods to run with real-time
priority while continuing to invoke RCU callbacks as SCHED_OTHER.
As you would expect, this RCU_NOCB_CPU_CB_BOOST Kconfig option has no
effect except on CPUs with offloaded RCU callbacks.
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Acked-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Reviewed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>
2022-05-11 08:57:03 +00:00
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
WRITE_ONCE(rdp->nocb_cb_kthread, t);
|
|
|
|
WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
|
2022-04-19 12:23:20 +00:00
|
|
|
return;
|
2024-05-30 13:45:44 +00:00
|
|
|
|
|
|
|
err:
|
|
|
|
/*
|
|
|
|
* No need to protect against concurrent rcu_barrier()
|
|
|
|
* because the number of callbacks should be 0 for a non-boot CPU,
|
|
|
|
* therefore rcu_barrier() shouldn't even try to grab the nocb_lock.
|
2024-05-30 13:45:45 +00:00
|
|
|
* But hold nocb_mutex to avoid nocb_lock imbalance from shrinker.
|
2024-05-30 13:45:44 +00:00
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(system_state > SYSTEM_BOOTING && rcu_segcblist_n_cbs(&rdp->cblist));
|
2024-05-30 13:45:45 +00:00
|
|
|
mutex_lock(&rcu_state.nocb_mutex);
|
2022-04-19 12:23:20 +00:00
|
|
|
if (rcu_rdp_is_offloaded(rdp)) {
|
|
|
|
rcu_nocb_rdp_deoffload(rdp);
|
|
|
|
cpumask_clear_cpu(cpu, rcu_nocb_mask);
|
|
|
|
}
|
2024-05-30 13:45:45 +00:00
|
|
|
mutex_unlock(&rcu_state.nocb_mutex);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* How many CB CPU IDs per GP kthread? Default of -1 for sqrt(nr_cpu_ids). */
|
|
|
|
static int rcu_nocb_gp_stride = -1;
|
|
|
|
module_param(rcu_nocb_gp_stride, int, 0444);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize GP-CB relationships for all no-CBs CPU.
|
|
|
|
*/
|
|
|
|
static void __init rcu_organize_nocb_kthreads(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
bool firsttime = true;
|
|
|
|
bool gotnocbs = false;
|
|
|
|
bool gotnocbscbs = true;
|
|
|
|
int ls = rcu_nocb_gp_stride;
|
|
|
|
int nl = 0; /* Next GP kthread. */
|
|
|
|
struct rcu_data *rdp;
|
|
|
|
struct rcu_data *rdp_gp = NULL; /* Suppress misguided gcc warn. */
|
|
|
|
|
|
|
|
if (!cpumask_available(rcu_nocb_mask))
|
|
|
|
return;
|
|
|
|
if (ls == -1) {
|
|
|
|
ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
|
|
|
|
rcu_nocb_gp_stride = ls;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each pass through this loop sets up one rcu_data structure.
|
|
|
|
* Should the corresponding CPU come online in the future, then
|
|
|
|
* we will spawn the needed set of rcu_nocb_kthread() kthreads.
|
|
|
|
*/
|
2021-11-23 00:37:06 +00:00
|
|
|
for_each_possible_cpu(cpu) {
|
2021-05-19 00:09:28 +00:00
|
|
|
rdp = per_cpu_ptr(&rcu_data, cpu);
|
|
|
|
if (rdp->cpu >= nl) {
|
|
|
|
/* New GP kthread, set up for CBs & next GP. */
|
|
|
|
gotnocbs = true;
|
|
|
|
nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
|
|
|
|
rdp_gp = rdp;
|
2021-11-23 00:37:03 +00:00
|
|
|
INIT_LIST_HEAD(&rdp->nocb_head_rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
if (dump_tree) {
|
|
|
|
if (!firsttime)
|
|
|
|
pr_cont("%s\n", gotnocbscbs
|
|
|
|
? "" : " (self only)");
|
|
|
|
gotnocbscbs = false;
|
|
|
|
firsttime = false;
|
|
|
|
pr_alert("%s: No-CB GP kthread CPU %d:",
|
|
|
|
__func__, cpu);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Another CB kthread, link to previous GP kthread. */
|
|
|
|
gotnocbscbs = true;
|
|
|
|
if (dump_tree)
|
|
|
|
pr_cont(" %d", cpu);
|
|
|
|
}
|
2021-11-23 00:37:03 +00:00
|
|
|
rdp->nocb_gp_rdp = rdp_gp;
|
2021-11-23 00:37:06 +00:00
|
|
|
if (cpumask_test_cpu(cpu, rcu_nocb_mask))
|
|
|
|
list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
if (gotnocbs && dump_tree)
|
|
|
|
pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bind the current task to the offloaded CPUs. If there are no offloaded
|
|
|
|
* CPUs, leave the task unbound. Splat if the bind attempt fails.
|
|
|
|
*/
|
|
|
|
void rcu_bind_current_to_nocb(void)
|
|
|
|
{
|
2022-01-23 18:38:53 +00:00
|
|
|
if (cpumask_available(rcu_nocb_mask) && !cpumask_empty(rcu_nocb_mask))
|
2021-05-19 00:09:28 +00:00
|
|
|
WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
|
|
|
|
|
|
|
|
// The ->on_cpu field is available only in CONFIG_SMP=y, so...
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
|
|
|
|
{
|
|
|
|
return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
|
|
|
|
}
|
|
|
|
#else // #ifdef CONFIG_SMP
|
|
|
|
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
|
|
|
|
{
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
#endif // #else #ifdef CONFIG_SMP
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dump out nocb grace-period kthread state for the specified rcu_data
|
|
|
|
* structure.
|
|
|
|
*/
|
|
|
|
static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
struct rcu_node *rnp = rdp->mynode;
|
|
|
|
|
|
|
|
pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
|
|
|
|
rdp->cpu,
|
|
|
|
"kK"[!!rdp->nocb_gp_kthread],
|
|
|
|
"lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
|
|
|
|
"dD"[!!rdp->nocb_defer_wakeup],
|
|
|
|
"tT"[timer_pending(&rdp->nocb_timer)],
|
|
|
|
"sS"[!!rdp->nocb_gp_sleep],
|
|
|
|
".W"[swait_active(&rdp->nocb_gp_wq)],
|
|
|
|
".W"[swait_active(&rnp->nocb_gp_wq[0])],
|
|
|
|
".W"[swait_active(&rnp->nocb_gp_wq[1])],
|
|
|
|
".B"[!!rdp->nocb_gp_bypass],
|
|
|
|
".G"[!!rdp->nocb_gp_gp],
|
|
|
|
(long)rdp->nocb_gp_seq,
|
|
|
|
rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
|
|
|
|
rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
|
2022-06-17 14:15:19 +00:00
|
|
|
rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
|
|
|
|
show_rcu_should_be_on_cpu(rdp->nocb_gp_kthread));
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Dump out nocb kthread state for the specified rcu_data structure. */
|
|
|
|
static void show_rcu_nocb_state(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
char bufw[20];
|
|
|
|
char bufr[20];
|
2021-11-23 00:37:03 +00:00
|
|
|
struct rcu_data *nocb_next_rdp;
|
2021-05-19 00:09:28 +00:00
|
|
|
struct rcu_segcblist *rsclp = &rdp->cblist;
|
|
|
|
bool waslocked;
|
|
|
|
bool wassleep;
|
|
|
|
|
|
|
|
if (rdp->nocb_gp_rdp == rdp)
|
|
|
|
show_rcu_nocb_gp_state(rdp);
|
|
|
|
|
2021-11-23 00:37:03 +00:00
|
|
|
nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp,
|
|
|
|
&rdp->nocb_entry_rdp,
|
|
|
|
typeof(*rdp),
|
|
|
|
nocb_entry_rdp);
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
|
|
|
|
sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
|
2024-04-25 14:18:35 +00:00
|
|
|
pr_info(" CB %d^%d->%d %c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
|
2021-05-19 00:09:28 +00:00
|
|
|
rdp->cpu, rdp->nocb_gp_rdp->cpu,
|
2021-11-23 00:37:03 +00:00
|
|
|
nocb_next_rdp ? nocb_next_rdp->cpu : -1,
|
2021-05-19 00:09:28 +00:00
|
|
|
"kK"[!!rdp->nocb_cb_kthread],
|
|
|
|
"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
|
|
|
|
"lL"[raw_spin_is_locked(&rdp->nocb_lock)],
|
|
|
|
"sS"[!!rdp->nocb_cb_sleep],
|
|
|
|
".W"[swait_active(&rdp->nocb_cb_wq)],
|
|
|
|
jiffies - rdp->nocb_bypass_first,
|
|
|
|
jiffies - rdp->nocb_nobypass_last,
|
|
|
|
rdp->nocb_nobypass_count,
|
|
|
|
".D"[rcu_segcblist_ready_cbs(rsclp)],
|
|
|
|
".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
|
|
|
|
rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
|
|
|
|
".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
|
|
|
|
rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
|
|
|
|
".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
|
|
|
|
".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
|
|
|
|
rcu_segcblist_n_cbs(&rdp->cblist),
|
|
|
|
rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
|
2022-06-17 14:15:19 +00:00
|
|
|
rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1,
|
2021-05-19 00:09:28 +00:00
|
|
|
show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
|
|
|
|
|
|
|
|
/* It is OK for GP kthreads to have GP state. */
|
|
|
|
if (rdp->nocb_gp_rdp == rdp)
|
|
|
|
return;
|
|
|
|
|
|
|
|
waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
|
|
|
|
wassleep = swait_active(&rdp->nocb_gp_wq);
|
|
|
|
if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
|
|
|
|
return; /* Nothing untoward. */
|
|
|
|
|
|
|
|
pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
|
|
|
|
"lL"[waslocked],
|
|
|
|
"dD"[!!rdp->nocb_defer_wakeup],
|
|
|
|
"sS"[!!rdp->nocb_gp_sleep],
|
|
|
|
".W"[wassleep]);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
|
|
|
|
|
|
|
|
/* No ->nocb_lock to acquire. */
|
|
|
|
static void rcu_nocb_lock(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No ->nocb_lock to release. */
|
|
|
|
static void rcu_nocb_unlock(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No ->nocb_lock to release. */
|
|
|
|
static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Lockdep check that ->cblist may be safely accessed. */
|
|
|
|
static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_init_one_nocb(struct rcu_node *rnp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-10-16 16:22:53 +00:00
|
|
|
static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-05-19 00:09:28 +00:00
|
|
|
static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
|
2022-10-16 16:22:54 +00:00
|
|
|
unsigned long j, bool lazy)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-01-09 22:24:01 +00:00
|
|
|
static void call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *head,
|
|
|
|
rcu_callback_t func, unsigned long flags, bool lazy)
|
2021-05-19 00:09:28 +00:00
|
|
|
{
|
2024-01-09 22:24:01 +00:00
|
|
|
WARN_ON_ONCE(1); /* Should be dead code! */
|
2021-05-19 00:09:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
WARN_ON_ONCE(1); /* Should be dead code! */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rcu_spawn_cpu_nocb_kthread(int cpu)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void show_rcu_nocb_state(struct rcu_data *rdp)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
|