mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-06 05:13:18 +00:00
preempt: Provide preempt_[dis|en]able_nested()
On PREEMPT_RT enabled kernels, spinlocks and rwlocks are neither disabling preemption nor interrupts. Though there are a few places which depend on the implicit preemption/interrupt disable of those locks, e.g. seqcount write sections, per CPU statistics updates etc. To avoid sprinkling CONFIG_PREEMPT_RT conditionals all over the place, add preempt_disable_nested() and preempt_enable_nested() which should be descriptive enough. Add a lockdep assertion for the !PREEMPT_RT case to catch callers which do not have preemption disabled. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220825164131.402717-2-bigeasy@linutronix.de
This commit is contained in:
parent
521a547ced
commit
555bb4ccd1
@ -421,4 +421,46 @@ static inline void migrate_enable(void) { }
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/**
|
||||
* preempt_disable_nested - Disable preemption inside a normally preempt disabled section
|
||||
*
|
||||
* Use for code which requires preemption protection inside a critical
|
||||
* section which has preemption disabled implicitly on non-PREEMPT_RT
|
||||
* enabled kernels, by e.g.:
|
||||
* - holding a spinlock/rwlock
|
||||
* - soft interrupt context
|
||||
* - regular interrupt handlers
|
||||
*
|
||||
* On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
|
||||
* interrupt context and regular interrupt handlers are preemptible and
|
||||
* only prevent migration. preempt_disable_nested() ensures that preemption
|
||||
* is disabled for cases which require CPU local serialization even on
|
||||
* PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
|
||||
*
|
||||
* The use cases are code sequences which are not serialized by a
|
||||
* particular lock instance, e.g.:
|
||||
* - seqcount write side critical sections where the seqcount is not
|
||||
* associated to a particular lock and therefore the automatic
|
||||
* protection mechanism does not work. This prevents a live lock
|
||||
* against a preempting high priority reader.
|
||||
* - RMW per CPU variable updates like vmstat.
|
||||
*/
|
||||
/* Macro to avoid header recursion hell vs. lockdep */
|
||||
#define preempt_disable_nested() \
|
||||
do { \
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
|
||||
preempt_disable(); \
|
||||
else \
|
||||
lockdep_assert_preemption_disabled(); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* preempt_enable_nested - Undo the effect of preempt_disable_nested()
|
||||
*/
|
||||
static __always_inline void preempt_enable_nested(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#endif /* __LINUX_PREEMPT_H */
|
||||
|
Loading…
Reference in New Issue
Block a user