asm-generic: ticket-lock: Add separate ticket-lock.h

Add a separate ticket-lock.h to include multiple spinlock versions and
select one at compile time or runtime.

Reviewed-by: Leonardo Bras <leobras@redhat.com>
Suggested-by: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/linux-riscv/CAK8P3a2rnz9mQqhN6-e0CGUUv9rntRELFdxt_weiD7FxH7fkfQ@mail.gmail.com/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Reviewed-by: Andrea Parri <parri.andrea@gmail.com>
Link: https://lore.kernel.org/r/20241103145153.105097-11-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
Guo Ren 2024-11-03 15:51:50 +01:00 committed by Palmer Dabbelt
parent cbe82e140b
commit 22c33321e2
No known key found for this signature in database
GPG Key ID: 2E1319F35FBB1889
2 changed files with 104 additions and 86 deletions

View File

@ -1,94 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* 'Generic' ticket-lock implementation.
*
* It relies on atomic_fetch_add() having well defined forward progress
* guarantees under contention. If your architecture cannot provide this, stick
* to a test-and-set lock.
*
* It also relies on atomic_fetch_add() being safe vs smp_store_release() on a
* sub-word of the value. This is generally true for anything LL/SC although
* you'd be hard pressed to find anything useful in architecture specifications
* about this. If your architecture cannot do this you might be better off with
* a test-and-set.
*
* It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
* uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
* a full fence after the spin to upgrade the otherwise-RCpc
* atomic_cond_read_acquire().
*
* The implementation uses smp_cond_load_acquire() to spin, so if the
* architecture has WFE like instructions to sleep instead of poll for word
* modifications be sure to implement that (see ARM64 for example).
*
*/
#ifndef __ASM_GENERIC_SPINLOCK_H
#define __ASM_GENERIC_SPINLOCK_H
#include <linux/atomic.h>
#include <asm-generic/spinlock_types.h>
static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
u32 val = atomic_fetch_add(1<<16, &lock->val);
u16 ticket = val >> 16;
if (ticket == (u16)val)
return;
/*
* atomic_cond_read_acquire() is RCpc, but rather than defining a
* custom cond_read_rcsc() here we just emit a full fence. We only
* need the prior reads before subsequent writes ordering from
* smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
* have no outstanding writes due to the atomic_fetch_add() the extra
* orderings are free.
*/
atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL);
smp_mb();
}
static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock)
{
u32 old = atomic_read(&lock->val);
if ((old >> 16) != (old & 0xffff))
return false;
return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */
}
static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
u32 val = atomic_read(&lock->val);
smp_store_release(ptr, (u16)val + 1);
}
static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
u32 val = lock.val.counter;
return ((val >> 16) == (val & 0xffff));
}
static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
arch_spinlock_t val = READ_ONCE(*lock);
return !arch_spin_value_unlocked(val);
}
static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
u32 val = atomic_read(&lock->val);
return (s16)((val >> 16) - (val & 0xffff)) > 1;
}
#include <asm-generic/ticket_spinlock.h>
#include <asm/qrwlock.h>
#endif /* __ASM_GENERIC_SPINLOCK_H */

View File

@ -0,0 +1,103 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* 'Generic' ticket-lock implementation.
*
* It relies on atomic_fetch_add() having well defined forward progress
* guarantees under contention. If your architecture cannot provide this, stick
* to a test-and-set lock.
*
* It also relies on atomic_fetch_add() being safe vs smp_store_release() on a
* sub-word of the value. This is generally true for anything LL/SC although
* you'd be hard pressed to find anything useful in architecture specifications
* about this. If your architecture cannot do this you might be better off with
* a test-and-set.
*
* It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence
* uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with
* a full fence after the spin to upgrade the otherwise-RCpc
* atomic_cond_read_acquire().
*
* The implementation uses smp_cond_load_acquire() to spin, so if the
* architecture has WFE like instructions to sleep instead of poll for word
* modifications be sure to implement that (see ARM64 for example).
*
*/
#ifndef __ASM_GENERIC_TICKET_SPINLOCK_H
#define __ASM_GENERIC_TICKET_SPINLOCK_H
#include <linux/atomic.h>
#include <asm-generic/spinlock_types.h>
static __always_inline void ticket_spin_lock(arch_spinlock_t *lock)
{
u32 val = atomic_fetch_add(1<<16, &lock->val);
u16 ticket = val >> 16;
if (ticket == (u16)val)
return;
/*
* atomic_cond_read_acquire() is RCpc, but rather than defining a
* custom cond_read_rcsc() here we just emit a full fence. We only
* need the prior reads before subsequent writes ordering from
* smb_mb(), but as atomic_cond_read_acquire() just emits reads and we
* have no outstanding writes due to the atomic_fetch_add() the extra
* orderings are free.
*/
atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL);
smp_mb();
}
static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock)
{
u32 old = atomic_read(&lock->val);
if ((old >> 16) != (old & 0xffff))
return false;
return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */
}
static __always_inline void ticket_spin_unlock(arch_spinlock_t *lock)
{
u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
u32 val = atomic_read(&lock->val);
smp_store_release(ptr, (u16)val + 1);
}
static __always_inline int ticket_spin_value_unlocked(arch_spinlock_t lock)
{
u32 val = lock.val.counter;
return ((val >> 16) == (val & 0xffff));
}
static __always_inline int ticket_spin_is_locked(arch_spinlock_t *lock)
{
arch_spinlock_t val = READ_ONCE(*lock);
return !ticket_spin_value_unlocked(val);
}
static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
{
u32 val = atomic_read(&lock->val);
return (s16)((val >> 16) - (val & 0xffff)) > 1;
}
/*
* Remapping spinlock architecture specific functions to the corresponding
* ticket spinlock functions.
*/
#define arch_spin_is_locked(l) ticket_spin_is_locked(l)
#define arch_spin_is_contended(l) ticket_spin_is_contended(l)
#define arch_spin_value_unlocked(l) ticket_spin_value_unlocked(l)
#define arch_spin_lock(l) ticket_spin_lock(l)
#define arch_spin_trylock(l) ticket_spin_trylock(l)
#define arch_spin_unlock(l) ticket_spin_unlock(l)
#endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */