linux-next/include/linux/local_lock.h
Sebastian Andrzej Siewior c5bcab7558 locking/local_lock: Add local nested BH locking infrastructure.
Add local_lock_nested_bh() locking. It is based on local_lock_t and the
naming follows the preempt_disable_nested() example.

For !PREEMPT_RT + !LOCKDEP it is a per-CPU annotation for locking
assumptions based on local_bh_disable(). The macro is optimized away
during compilation.
For !PREEMPT_RT + LOCKDEP the local_lock_nested_bh() is reduced to
the usual lock-acquire plus lockdep_assert_in_softirq() - ensuring that
BH is disabled.

For PREEMPT_RT local_lock_nested_bh() acquires the specified per-CPU
lock. It does not disable CPU migration because it relies on
local_bh_disable() disabling CPU migration.
With LOCKDEP it performans the usual lockdep checks as with !PREEMPT_RT.
Due to include hell the softirq check has been moved spinlock.c.

The intention is to use this locking in places where locking of a per-CPU
variable relies on BH being disabled. Instead of treating disabled
bottom halves as a big per-CPU lock, PREEMPT_RT can use this to reduce
the locking scope to what actually needs protecting.
A side effect is that it also documents the protection scope of the
per-CPU variables.

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20240620132727.660738-3-bigeasy@linutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2024-06-24 16:41:22 -07:00

76 lines
2.0 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LOCAL_LOCK_H
#define _LINUX_LOCAL_LOCK_H
#include <linux/local_lock_internal.h>
/**
* local_lock_init - Runtime initialize a lock instance
*/
#define local_lock_init(lock) __local_lock_init(lock)
/**
* local_lock - Acquire a per CPU local lock
* @lock: The lock variable
*/
#define local_lock(lock) __local_lock(lock)
/**
* local_lock_irq - Acquire a per CPU local lock and disable interrupts
* @lock: The lock variable
*/
#define local_lock_irq(lock) __local_lock_irq(lock)
/**
* local_lock_irqsave - Acquire a per CPU local lock, save and disable
* interrupts
* @lock: The lock variable
* @flags: Storage for interrupt flags
*/
#define local_lock_irqsave(lock, flags) \
__local_lock_irqsave(lock, flags)
/**
* local_unlock - Release a per CPU local lock
* @lock: The lock variable
*/
#define local_unlock(lock) __local_unlock(lock)
/**
* local_unlock_irq - Release a per CPU local lock and enable interrupts
* @lock: The lock variable
*/
#define local_unlock_irq(lock) __local_unlock_irq(lock)
/**
* local_unlock_irqrestore - Release a per CPU local lock and restore
* interrupt flags
* @lock: The lock variable
* @flags: Interrupt flags to restore
*/
#define local_unlock_irqrestore(lock, flags) \
__local_unlock_irqrestore(lock, flags)
DEFINE_GUARD(local_lock, local_lock_t __percpu*,
local_lock(_T),
local_unlock(_T))
DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*,
local_lock_irq(_T),
local_unlock_irq(_T))
DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu,
local_lock_irqsave(_T->lock, _T->flags),
local_unlock_irqrestore(_T->lock, _T->flags),
unsigned long flags)
#define local_lock_nested_bh(_lock) \
__local_lock_nested_bh(_lock)
#define local_unlock_nested_bh(_lock) \
__local_unlock_nested_bh(_lock)
DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*,
local_lock_nested_bh(_T),
local_unlock_nested_bh(_T))
#endif