Merge branch into tip/master: 'locking/core'

# New commits in locking/core:
    cb4ccc70344c ("MAINTAINERS: Add static_call_inline.c to STATIC BRANCH/CALL")
    a937f384c9da ("cleanup, tags: Create tags for the cleanup primitives")
    abfdccd6af2b ("sched/wake_q: Add helper to call wake_up_q after unlock with preemption disabled")
    fbd7a5a0359b ("rust: sync: Add lock::Backend::assert_is_held()")
    eb5ccb038284 ("rust: sync: Add SpinLockGuard type alias")
    37624dde4768 ("rust: sync: Add MutexGuard type alias")
    daa03fe50ec3 ("rust: sync: Make Guard::new() public")
    15abc88057ee ("rust: sync: Add Lock::from_raw() for Lock<(), B>")
    9793c9bb91f1 ("locking: MAINTAINERS: Start watching Rust locking primitives")
    343060092585 ("lockdep: Move lockdep_assert_locked() under #ifdef CONFIG_PROVE_LOCKING")
    8148fa2e022b ("lockdep: Mark chain_hlock_class_idx() with __maybe_unused")
    bd7b5ae26618 ("lockdep: Document MAX_LOCKDEP_CHAIN_HLOCKS calculation")
    88a79e88a97c ("lockdep: Clarify size for LOCKDEP_*_BITS configs")
    e638072e6172 ("lockdep: Fix upper limit for LOCKDEP_*_BITS configs")
    0d3547df6934 ("locking/ww_mutex/test: Use swap() macro")
    63a48181fbcd ("smp/scf: Evaluate local cond_func() before IPI side-effects")
    d387ceb17149 ("locking/lockdep: Enforce PROVE_RAW_LOCK_NESTING only if ARCH_SUPPORTS_RT")

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2025-01-13 18:13:42 +01:00
commit 90df9792d9
17 changed files with 152 additions and 69 deletions

View File

@ -13429,8 +13429,8 @@ LOCKING PRIMITIVES
M: Peter Zijlstra <peterz@infradead.org> M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com> M: Ingo Molnar <mingo@redhat.com>
M: Will Deacon <will@kernel.org> M: Will Deacon <will@kernel.org>
M: Boqun Feng <boqun.feng@gmail.com> (LOCKDEP & RUST)
R: Waiman Long <longman@redhat.com> R: Waiman Long <longman@redhat.com>
R: Boqun Feng <boqun.feng@gmail.com> (LOCKDEP)
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
@ -13444,6 +13444,11 @@ F: include/linux/seqlock.h
F: include/linux/spinlock*.h F: include/linux/spinlock*.h
F: kernel/locking/ F: kernel/locking/
F: lib/locking*.[ch] F: lib/locking*.[ch]
F: rust/helpers/mutex.c
F: rust/helpers/spinlock.c
F: rust/kernel/sync/lock.rs
F: rust/kernel/sync/lock/
F: rust/kernel/sync/locked_by.rs
X: kernel/locking/locktorture.c X: kernel/locking/locktorture.c
LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
@ -22450,7 +22455,7 @@ F: arch/*/kernel/static_call.c
F: include/linux/jump_label*.h F: include/linux/jump_label*.h
F: include/linux/static_call*.h F: include/linux/static_call*.h
F: kernel/jump_label.c F: kernel/jump_label.c
F: kernel/static_call.c F: kernel/static_call*.c
STI AUDIO (ASoC) DRIVERS STI AUDIO (ASoC) DRIVERS
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com> M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>

View File

@ -63,4 +63,38 @@ extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head); extern void wake_up_q(struct wake_q_head *head);
/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
static inline
void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
{
guard(preempt)();
raw_spin_unlock(lock);
if (wake_q) {
wake_up_q(wake_q);
wake_q_init(wake_q);
}
}
static inline
void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
{
guard(preempt)();
raw_spin_unlock_irq(lock);
if (wake_q) {
wake_up_q(wake_q);
wake_q_init(wake_q);
}
}
static inline
void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
struct wake_q_head *wake_q)
{
guard(preempt)();
raw_spin_unlock_irqrestore(lock, flags);
if (wake_q) {
wake_up_q(wake_q);
wake_q_init(wake_q);
}
}
#endif /* _LINUX_SCHED_WAKE_Q_H */ #endif /* _LINUX_SCHED_WAKE_Q_H */

View File

@ -1020,10 +1020,7 @@ retry_private:
* it sees the futex_q::pi_state. * it sees the futex_q::pi_state.
*/ */
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q); ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current, &wake_q);
preempt_disable(); raw_spin_unlock_irq_wake(&q.pi_state->pi_mutex.wait_lock, &wake_q);
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
wake_up_q(&wake_q);
preempt_enable();
if (ret) { if (ret) {
if (ret == 1) if (ret == 1)

View File

@ -157,10 +157,12 @@ static inline void lockdep_unlock(void)
__this_cpu_dec(lockdep_recursion); __this_cpu_dec(lockdep_recursion);
} }
#ifdef CONFIG_PROVE_LOCKING
static inline bool lockdep_assert_locked(void) static inline bool lockdep_assert_locked(void)
{ {
return DEBUG_LOCKS_WARN_ON(__owner != current); return DEBUG_LOCKS_WARN_ON(__owner != current);
} }
#endif
static struct task_struct *lockdep_selftest_task_struct; static struct task_struct *lockdep_selftest_task_struct;
@ -430,7 +432,7 @@ static inline u16 hlock_id(struct held_lock *hlock)
return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS)); return (hlock->class_idx | (hlock->read << MAX_LOCKDEP_KEYS_BITS));
} }
static inline unsigned int chain_hlock_class_idx(u16 hlock_id) static inline __maybe_unused unsigned int chain_hlock_class_idx(u16 hlock_id)
{ {
return hlock_id & (MAX_LOCKDEP_KEYS - 1); return hlock_id & (MAX_LOCKDEP_KEYS - 1);
} }

View File

@ -119,7 +119,8 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) #define AVG_LOCKDEP_CHAIN_DEPTH 5
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS * AVG_LOCKDEP_CHAIN_DEPTH)
extern struct lock_chain lock_chains[]; extern struct lock_chain lock_chains[];

View File

@ -657,10 +657,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
goto err; goto err;
} }
raw_spin_unlock_irqrestore(&lock->wait_lock, flags); raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
/* Make sure we do wakeups before calling schedule */
wake_up_q(&wake_q);
wake_q_init(&wake_q);
schedule_preempt_disabled(); schedule_preempt_disabled();
@ -710,8 +707,7 @@ skip_wait:
if (ww_ctx) if (ww_ctx)
ww_mutex_lock_acquired(ww, ww_ctx); ww_mutex_lock_acquired(ww, ww_ctx);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags); raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
wake_up_q(&wake_q);
preempt_enable(); preempt_enable();
return 0; return 0;
@ -720,10 +716,9 @@ err:
__mutex_remove_waiter(lock, &waiter); __mutex_remove_waiter(lock, &waiter);
err_early_kill: err_early_kill:
trace_contention_end(lock, ret); trace_contention_end(lock, ret);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags); raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
debug_mutex_free_waiter(&waiter); debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, ip); mutex_release(&lock->dep_map, ip);
wake_up_q(&wake_q);
preempt_enable(); preempt_enable();
return ret; return ret;
} }
@ -935,10 +930,7 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
if (owner & MUTEX_FLAG_HANDOFF) if (owner & MUTEX_FLAG_HANDOFF)
__mutex_handoff(lock, next); __mutex_handoff(lock, next);
preempt_disable(); raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
wake_up_q(&wake_q);
preempt_enable();
} }
#ifndef CONFIG_DEBUG_LOCK_ALLOC #ifndef CONFIG_DEBUG_LOCK_ALLOC

View File

@ -1292,13 +1292,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
*/ */
get_task_struct(owner); get_task_struct(owner);
preempt_disable(); raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
raw_spin_unlock_irq(&lock->wait_lock);
/* wake up any tasks on the wake_q before calling rt_mutex_adjust_prio_chain */
wake_up_q(wake_q);
wake_q_init(wake_q);
preempt_enable();
res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
next_lock, waiter, task); next_lock, waiter, task);
@ -1642,13 +1636,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
owner = rt_mutex_owner(lock); owner = rt_mutex_owner(lock);
else else
owner = NULL; owner = NULL;
preempt_disable(); raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
raw_spin_unlock_irq(&lock->wait_lock);
if (wake_q) {
wake_up_q(wake_q);
wake_q_init(wake_q);
}
preempt_enable();
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
rt_mutex_schedule(); rt_mutex_schedule();
@ -1799,10 +1787,7 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
*/ */
raw_spin_lock_irqsave(&lock->wait_lock, flags); raw_spin_lock_irqsave(&lock->wait_lock, flags);
ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state, &wake_q); ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state, &wake_q);
preempt_disable(); raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
wake_up_q(&wake_q);
preempt_enable();
rt_mutex_post_schedule(); rt_mutex_post_schedule();
return ret; return ret;
@ -1860,11 +1845,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
owner = rt_mutex_owner(lock); owner = rt_mutex_owner(lock);
else else
owner = NULL; owner = NULL;
preempt_disable(); raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
raw_spin_unlock_irq(&lock->wait_lock);
wake_up_q(wake_q);
wake_q_init(wake_q);
preempt_enable();
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
schedule_rtlock(); schedule_rtlock();
@ -1893,10 +1874,7 @@ static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
raw_spin_lock_irqsave(&lock->wait_lock, flags); raw_spin_lock_irqsave(&lock->wait_lock, flags);
rtlock_slowlock_locked(lock, &wake_q); rtlock_slowlock_locked(lock, &wake_q);
preempt_disable(); raw_spin_unlock_irqrestore_wake(&lock->wait_lock, flags, &wake_q);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
wake_up_q(&wake_q);
preempt_enable();
} }
#endif /* RT_MUTEX_BUILD_SPINLOCKS */ #endif /* RT_MUTEX_BUILD_SPINLOCKS */

View File

@ -404,7 +404,7 @@ static inline u32 prandom_u32_below(u32 ceil)
static int *get_random_order(int count) static int *get_random_order(int count)
{ {
int *order; int *order;
int n, r, tmp; int n, r;
order = kmalloc_array(count, sizeof(*order), GFP_KERNEL); order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
if (!order) if (!order)
@ -415,11 +415,8 @@ static int *get_random_order(int count)
for (n = count - 1; n > 1; n--) { for (n = count - 1; n > 1; n--) {
r = prandom_u32_below(n + 1); r = prandom_u32_below(n + 1);
if (r != n) { if (r != n)
tmp = order[n]; swap(order[n], order[r]);
order[n] = order[r];
order[r] = tmp;
}
} }
return order; return order;

View File

@ -815,7 +815,8 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
WARN_ON_ONCE(!in_task()); WARN_ON_ONCE(!in_task());
/* Check if we need local execution. */ /* Check if we need local execution. */
if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask)) if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask) &&
(!cond_func || cond_func(this_cpu, info)))
run_local = true; run_local = true;
/* Check if we need remote execution, i.e., any CPU excluding this one. */ /* Check if we need remote execution, i.e., any CPU excluding this one. */
@ -868,7 +869,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
send_call_function_ipi_mask(cfd->cpumask_ipi); send_call_function_ipi_mask(cfd->cpumask_ipi);
} }
if (run_local && (!cond_func || cond_func(this_cpu, info))) { if (run_local) {
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);

View File

@ -1397,9 +1397,9 @@ config PROVE_LOCKING
For more details, see Documentation/locking/lockdep-design.rst. For more details, see Documentation/locking/lockdep-design.rst.
config PROVE_RAW_LOCK_NESTING config PROVE_RAW_LOCK_NESTING
bool bool "Enable raw_spinlock - spinlock nesting checks" if !ARCH_SUPPORTS_RT
depends on PROVE_LOCKING depends on PROVE_LOCKING
default y default y if ARCH_SUPPORTS_RT
help help
Enable the raw_spinlock vs. spinlock nesting checks which ensure Enable the raw_spinlock vs. spinlock nesting checks which ensure
that the lock nesting rules for PREEMPT_RT enabled kernels are that the lock nesting rules for PREEMPT_RT enabled kernels are
@ -1502,15 +1502,15 @@ config LOCKDEP_SMALL
bool bool
config LOCKDEP_BITS config LOCKDEP_BITS
int "Bitsize for MAX_LOCKDEP_ENTRIES" int "Size for MAX_LOCKDEP_ENTRIES (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL depends on LOCKDEP && !LOCKDEP_SMALL
range 10 30 range 10 24
default 15 default 15
help help
Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message. Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message.
config LOCKDEP_CHAINS_BITS config LOCKDEP_CHAINS_BITS
int "Bitsize for MAX_LOCKDEP_CHAINS" int "Size for MAX_LOCKDEP_CHAINS (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL depends on LOCKDEP && !LOCKDEP_SMALL
range 10 21 range 10 21
default 16 default 16
@ -1518,25 +1518,25 @@ config LOCKDEP_CHAINS_BITS
Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message. Try increasing this value if you hit "BUG: MAX_LOCKDEP_CHAINS too low!" message.
config LOCKDEP_STACK_TRACE_BITS config LOCKDEP_STACK_TRACE_BITS
int "Bitsize for MAX_STACK_TRACE_ENTRIES" int "Size for MAX_STACK_TRACE_ENTRIES (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL depends on LOCKDEP && !LOCKDEP_SMALL
range 10 30 range 10 26
default 19 default 19
help help
Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message. Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message.
config LOCKDEP_STACK_TRACE_HASH_BITS config LOCKDEP_STACK_TRACE_HASH_BITS
int "Bitsize for STACK_TRACE_HASH_SIZE" int "Size for STACK_TRACE_HASH_SIZE (as Nth power of 2)"
depends on LOCKDEP && !LOCKDEP_SMALL depends on LOCKDEP && !LOCKDEP_SMALL
range 10 30 range 10 26
default 14 default 14
help help
Try increasing this value if you need large STACK_TRACE_HASH_SIZE. Try increasing this value if you need large STACK_TRACE_HASH_SIZE.
config LOCKDEP_CIRCULAR_QUEUE_BITS config LOCKDEP_CIRCULAR_QUEUE_BITS
int "Bitsize for elements in circular_queue struct" int "Size for elements in circular_queue struct (as Nth power of 2)"
depends on LOCKDEP depends on LOCKDEP
range 10 30 range 10 26
default 12 default 12
help help
Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure. Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure.

View File

@ -12,3 +12,8 @@ void rust_helper___mutex_init(struct mutex *mutex, const char *name,
{ {
__mutex_init(mutex, name, key); __mutex_init(mutex, name, key);
} }
void rust_helper_mutex_assert_is_held(struct mutex *mutex)
{
lockdep_assert_held(mutex);
}

View File

@ -30,3 +30,8 @@ int rust_helper_spin_trylock(spinlock_t *lock)
{ {
return spin_trylock(lock); return spin_trylock(lock);
} }
void rust_helper_spin_assert_is_held(spinlock_t *lock)
{
lockdep_assert_held(lock);
}

View File

@ -16,8 +16,8 @@ pub mod poll;
pub use arc::{Arc, ArcBorrow, UniqueArc}; pub use arc::{Arc, ArcBorrow, UniqueArc};
pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult}; pub use condvar::{new_condvar, CondVar, CondVarTimeoutResult};
pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy}; pub use lock::global::{global_lock, GlobalGuard, GlobalLock, GlobalLockBackend, GlobalLockedBy};
pub use lock::mutex::{new_mutex, Mutex}; pub use lock::mutex::{new_mutex, Mutex, MutexGuard};
pub use lock::spinlock::{new_spinlock, SpinLock}; pub use lock::spinlock::{new_spinlock, SpinLock, SpinLockGuard};
pub use locked_by::LockedBy; pub use locked_by::LockedBy;
/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`. /// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.

View File

@ -90,12 +90,20 @@ pub unsafe trait Backend {
// SAFETY: The safety requirements ensure that the lock is initialised. // SAFETY: The safety requirements ensure that the lock is initialised.
*guard_state = unsafe { Self::lock(ptr) }; *guard_state = unsafe { Self::lock(ptr) };
} }
/// Asserts that the lock is held using lockdep.
///
/// # Safety
///
/// Callers must ensure that [`Backend::init`] has been previously called.
unsafe fn assert_is_held(ptr: *mut Self::State);
} }
/// A mutual exclusion primitive. /// A mutual exclusion primitive.
/// ///
/// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock /// Exposes one of the kernel locking primitives. Which one is exposed depends on the lock
/// [`Backend`] specified as the generic parameter `B`. /// [`Backend`] specified as the generic parameter `B`.
#[repr(C)]
#[pin_data] #[pin_data]
pub struct Lock<T: ?Sized, B: Backend> { pub struct Lock<T: ?Sized, B: Backend> {
/// The kernel lock object. /// The kernel lock object.
@ -134,6 +142,28 @@ impl<T, B: Backend> Lock<T, B> {
} }
} }
impl<B: Backend> Lock<(), B> {
/// Constructs a [`Lock`] from a raw pointer.
///
/// This can be useful for interacting with a lock which was initialised outside of Rust.
///
/// # Safety
///
/// The caller promises that `ptr` points to a valid initialised instance of [`State`] during
/// the whole lifetime of `'a`.
///
/// [`State`]: Backend::State
pub unsafe fn from_raw<'a>(ptr: *mut B::State) -> &'a Self {
// SAFETY:
// - By the safety contract `ptr` must point to a valid initialised instance of `B::State`
// - Since the lock data type is `()` which is a ZST, `state` is the only non-ZST member of
// the struct
// - Combined with `#[repr(C)]`, this guarantees `Self` has an equivalent data layout to
// `B::State`.
unsafe { &*ptr.cast() }
}
}
impl<T: ?Sized, B: Backend> Lock<T, B> { impl<T: ?Sized, B: Backend> Lock<T, B> {
/// Acquires the lock and gives the caller access to the data protected by it. /// Acquires the lock and gives the caller access to the data protected by it.
pub fn lock(&self) -> Guard<'_, T, B> { pub fn lock(&self) -> Guard<'_, T, B> {
@ -211,7 +241,10 @@ impl<'a, T: ?Sized, B: Backend> Guard<'a, T, B> {
/// # Safety /// # Safety
/// ///
/// The caller must ensure that it owns the lock. /// The caller must ensure that it owns the lock.
pub(crate) unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self { pub unsafe fn new(lock: &'a Lock<T, B>, state: B::GuardState) -> Self {
// SAFETY: The caller can only hold the lock if `Backend::init` has already been called.
unsafe { B::assert_is_held(lock.state.get()) };
Self { Self {
lock, lock,
state, state,

View File

@ -86,6 +86,14 @@ pub use new_mutex;
/// [`struct mutex`]: srctree/include/linux/mutex.h /// [`struct mutex`]: srctree/include/linux/mutex.h
pub type Mutex<T> = super::Lock<T, MutexBackend>; pub type Mutex<T> = super::Lock<T, MutexBackend>;
/// A [`Guard`] acquired from locking a [`Mutex`].
///
/// This is simply a type alias for a [`Guard`] returned from locking a [`Mutex`]. It will unlock
/// the [`Mutex`] upon being dropped.
///
/// [`Guard`]: super::Guard
pub type MutexGuard<'a, T> = super::Guard<'a, T, MutexBackend>;
/// A kernel `struct mutex` lock backend. /// A kernel `struct mutex` lock backend.
pub struct MutexBackend; pub struct MutexBackend;
@ -126,4 +134,9 @@ unsafe impl super::Backend for MutexBackend {
None None
} }
} }
unsafe fn assert_is_held(ptr: *mut Self::State) {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
unsafe { bindings::mutex_assert_is_held(ptr) }
}
} }

View File

@ -87,6 +87,14 @@ pub type SpinLock<T> = super::Lock<T, SpinLockBackend>;
/// A kernel `spinlock_t` lock backend. /// A kernel `spinlock_t` lock backend.
pub struct SpinLockBackend; pub struct SpinLockBackend;
/// A [`Guard`] acquired from locking a [`SpinLock`].
///
/// This is simply a type alias for a [`Guard`] returned from locking a [`SpinLock`]. It will unlock
/// the [`SpinLock`] upon being dropped.
///
/// [`Guard`]: super::Guard
pub type SpinLockGuard<'a, T> = super::Guard<'a, T, SpinLockBackend>;
// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. `relock` uses the // SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion. `relock` uses the
// default implementation that always calls the same locking method. // default implementation that always calls the same locking method.
unsafe impl super::Backend for SpinLockBackend { unsafe impl super::Backend for SpinLockBackend {
@ -125,4 +133,9 @@ unsafe impl super::Backend for SpinLockBackend {
None None
} }
} }
unsafe fn assert_is_held(ptr: *mut Self::State) {
// SAFETY: The `ptr` pointer is guaranteed to be valid and initialized before use.
unsafe { bindings::spin_assert_is_held(ptr) }
}
} }

View File

@ -212,6 +212,13 @@ regex_c=(
'/^SEQCOUNT_LOCKTYPE(\([^,]*\),[[:space:]]*\([^,]*\),[^)]*)/seqcount_\2_init/' '/^SEQCOUNT_LOCKTYPE(\([^,]*\),[[:space:]]*\([^,]*\),[^)]*)/seqcount_\2_init/'
'/^\<DECLARE_IDTENTRY[[:alnum:]_]*([^,)]*,[[:space:]]*\([[:alnum:]_]\+\)/\1/' '/^\<DECLARE_IDTENTRY[[:alnum:]_]*([^,)]*,[[:space:]]*\([[:alnum:]_]\+\)/\1/'
'/^\<DEFINE_IDTENTRY[[:alnum:]_]*([[:space:]]*\([[:alnum:]_]\+\)/\1/' '/^\<DEFINE_IDTENTRY[[:alnum:]_]*([[:space:]]*\([[:alnum:]_]\+\)/\1/'
'/^\<DEFINE_FREE(\([[:alnum:]_]\+\)/cleanup_\1/'
'/^\<DEFINE_CLASS(\([[:alnum:]_]\+\)/class_\1/'
'/^\<EXTEND_CLASS(\([[:alnum:]_]\+\),[[:space:]]*\([[:alnum:]_]\+\)/class_\1\2/'
'/^\<DEFINE_GUARD(\([[:alnum:]_]\+\)/class_\1/'
'/^\<DEFINE_GUARD_COND(\([[:alnum:]_]\+\),[[:space:]]*\([[:alnum:]_]\+\)/class_\1\2/'
'/^\<DEFINE_LOCK_GUARD_[[:digit:]](\([[:alnum:]_]\+\)/class_\1/'
'/^\<DEFINE_LOCK_GUARD_[[:digit:]]_COND(\([[:alnum:]_]\+\),[[:space:]]*\([[:alnum:]_]\+\)/class_\1\2/'
) )
regex_kconfig=( regex_kconfig=(
'/^[[:blank:]]*\(menu\|\)config[[:blank:]]\+\([[:alnum:]_]\+\)/\2/' '/^[[:blank:]]*\(menu\|\)config[[:blank:]]\+\([[:alnum:]_]\+\)/\2/'