mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
16477cdfef
The asm-generic tree contains three separate changes for linux-5.19: - The h8300 architecture is retired after it has been effectively unmaintained for a number of years. This is the last architecture we supported that has no MMU implementation, but there are still a few architectures (arm, m68k, riscv, sh and xtensa) that support CPUs with and without an MMU. - A series to add a generic ticket spinlock that can be shared by most architectures with a working cmpxchg or ll/sc type atomic, including the conversion of riscv, csky and openrisc. This series is also a prerequisite for the loongarch64 architecture port that will come as a separate pull request. - A cleanup of some exported uapi header files to ensure they can be included from user space without relying on other kernel headers. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEo6/YBQwIrVS28WGKmmx57+YAGNkFAmKPlXoACgkQmmx57+YA GNkxrRAAnuSgOUo9JC5C4Gm2Q9yhEUHU1QIYeVO0jlan5CkF18bo1Loptq4MdQtO /0pXJPH8rFhDSJQLetO4AAjEMDfJGR5ibmf7SasO03HjqC9++fIeN047MbnkHAwY hFqIkgqn4l+g1RMWK5WUSDJ3XQ7p5/yWzpg/CuxJ+D0w9by/LWI5A+2NKGXOS3GF yi7cWvIKC1l+PmrH3BFA+JYVTvFzlr9P6x5pSEBi6HmjGQR+Xn3s0bnIf6DGRZ+B Q6v03kMxtcqI9e9C0r0r7ZGbdMuRTYbGrksa4EfK0yJM9P0HchhTtT9zawAK7Ddv VMM4B+9r60UEM++hOLS6XrLJdn+Fv+OJDnhONb5c+Mndd8cwV1JbOlVbUlGkn92e WSdUCW6m0TBzDs9Ae1++1kUl1LodlcmSzxlb0ueAhU01QacCPlneyIEKUhcrCl5w ITVw4YVa/BVCh+HvTEdhhak/Qb/nWiojMY+UIH5smiwj6FSFdwEmmgCgHAKprQaA STMxRnccFknGW9CZheoMATYsPIHQKPlm9lbiulSoMLDHxGwshU/6vKD4HDoZU51d HPmUZeKVPahXCUXB4iFI3qD4Ltxaru9VbgfUiY18VB2oc6Mk+0oeh6luqwsrgBdz P2sQ2riZKhN5Frm3DCh7IbJqoqKHlLMWh0itpNllgP5SDmDJjng= =ri2Q -----END PGP SIGNATURE----- Merge tag 'asm-generic-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic Pull asm-generic updates from Arnd Bergmann: "The asm-generic tree contains three separate changes for linux-5.19: - The h8300 architecture is retired after it has been effectively unmaintained for a number of years. This is the last architecture we supported that has no MMU implementation, but there are still a few architectures (arm, m68k, riscv, sh and xtensa) that support CPUs with and without an MMU. - A series to add a generic ticket spinlock that can be shared by most architectures with a working cmpxchg or ll/sc type atomic, including the conversion of riscv, csky and openrisc. This series is also a prerequisite for the loongarch64 architecture port that will come as a separate pull request. - A cleanup of some exported uapi header files to ensure they can be included from user space without relying on other kernel headers" * tag 'asm-generic-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: h8300: remove stale bindings and symlink sparc: add asm/stat.h to UAPI compile-test coverage powerpc: add asm/stat.h to UAPI compile-test coverage mips: add asm/stat.h to UAPI compile-test coverage riscv: add linux/bpf_perf_event.h to UAPI compile-test coverage kbuild: prevent exported headers from including <stdlib.h>, <stdbool.h> agpgart.h: do not include <stdlib.h> from exported header csky: Move to generic ticket-spinlock RISC-V: Move to queued RW locks RISC-V: Move to generic spinlocks openrisc: Move to ticket-spinlock asm-generic: qrwlock: Document the spinlock fairness requirements asm-generic: qspinlock: Indicate the use of mixed-size atomics asm-generic: ticket-lock: New generic ticket-based spinlock remove the h8300 architecture
148 lines
4.0 KiB
C
148 lines
4.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* Queue read/write lock
|
|
*
|
|
* These use generic atomic and locking routines, but depend on a fair spinlock
|
|
* implementation in order to be fair themselves. The implementation in
|
|
* asm-generic/spinlock.h meets these requirements.
|
|
*
|
|
* (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
|
|
*
|
|
* Authors: Waiman Long <waiman.long@hp.com>
|
|
*/
|
|
#ifndef __ASM_GENERIC_QRWLOCK_H
|
|
#define __ASM_GENERIC_QRWLOCK_H
|
|
|
|
#include <linux/atomic.h>
|
|
#include <asm/barrier.h>
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm-generic/qrwlock_types.h>
|
|
|
|
/* Must be included from asm/spinlock.h after defining arch_spin_is_locked. */
|
|
|
|
/*
|
|
* Writer states & reader shift and bias.
|
|
*/
|
|
#define _QW_WAITING 0x100 /* A writer is waiting */
|
|
#define _QW_LOCKED 0x0ff /* A writer holds the lock */
|
|
#define _QW_WMASK 0x1ff /* Writer mask */
|
|
#define _QR_SHIFT 9 /* Reader count shift */
|
|
#define _QR_BIAS (1U << _QR_SHIFT)
|
|
|
|
/*
|
|
* External function declarations
|
|
*/
|
|
extern void queued_read_lock_slowpath(struct qrwlock *lock);
|
|
extern void queued_write_lock_slowpath(struct qrwlock *lock);
|
|
|
|
/**
|
|
* queued_read_trylock - try to acquire read lock of a queued rwlock
|
|
* @lock : Pointer to queued rwlock structure
|
|
* Return: 1 if lock acquired, 0 if failed
|
|
*/
|
|
static inline int queued_read_trylock(struct qrwlock *lock)
|
|
{
|
|
int cnts;
|
|
|
|
cnts = atomic_read(&lock->cnts);
|
|
if (likely(!(cnts & _QW_WMASK))) {
|
|
cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
|
|
if (likely(!(cnts & _QW_WMASK)))
|
|
return 1;
|
|
atomic_sub(_QR_BIAS, &lock->cnts);
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* queued_write_trylock - try to acquire write lock of a queued rwlock
|
|
* @lock : Pointer to queued rwlock structure
|
|
* Return: 1 if lock acquired, 0 if failed
|
|
*/
|
|
static inline int queued_write_trylock(struct qrwlock *lock)
|
|
{
|
|
int cnts;
|
|
|
|
cnts = atomic_read(&lock->cnts);
|
|
if (unlikely(cnts))
|
|
return 0;
|
|
|
|
return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
|
|
_QW_LOCKED));
|
|
}
|
|
/**
|
|
* queued_read_lock - acquire read lock of a queued rwlock
|
|
* @lock: Pointer to queued rwlock structure
|
|
*/
|
|
static inline void queued_read_lock(struct qrwlock *lock)
|
|
{
|
|
int cnts;
|
|
|
|
cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts);
|
|
if (likely(!(cnts & _QW_WMASK)))
|
|
return;
|
|
|
|
/* The slowpath will decrement the reader count, if necessary. */
|
|
queued_read_lock_slowpath(lock);
|
|
}
|
|
|
|
/**
|
|
* queued_write_lock - acquire write lock of a queued rwlock
|
|
* @lock : Pointer to queued rwlock structure
|
|
*/
|
|
static inline void queued_write_lock(struct qrwlock *lock)
|
|
{
|
|
int cnts = 0;
|
|
/* Optimize for the unfair lock case where the fair flag is 0. */
|
|
if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
|
|
return;
|
|
|
|
queued_write_lock_slowpath(lock);
|
|
}
|
|
|
|
/**
|
|
* queued_read_unlock - release read lock of a queued rwlock
|
|
* @lock : Pointer to queued rwlock structure
|
|
*/
|
|
static inline void queued_read_unlock(struct qrwlock *lock)
|
|
{
|
|
/*
|
|
* Atomically decrement the reader count
|
|
*/
|
|
(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
|
|
}
|
|
|
|
/**
|
|
* queued_write_unlock - release write lock of a queued rwlock
|
|
* @lock : Pointer to queued rwlock structure
|
|
*/
|
|
static inline void queued_write_unlock(struct qrwlock *lock)
|
|
{
|
|
smp_store_release(&lock->wlocked, 0);
|
|
}
|
|
|
|
/**
|
|
* queued_rwlock_is_contended - check if the lock is contended
|
|
* @lock : Pointer to queued rwlock structure
|
|
* Return: 1 if lock contended, 0 otherwise
|
|
*/
|
|
static inline int queued_rwlock_is_contended(struct qrwlock *lock)
|
|
{
|
|
return arch_spin_is_locked(&lock->wait_lock);
|
|
}
|
|
|
|
/*
|
|
* Remapping rwlock architecture specific functions to the corresponding
|
|
* queued rwlock functions.
|
|
*/
|
|
#define arch_read_lock(l) queued_read_lock(l)
|
|
#define arch_write_lock(l) queued_write_lock(l)
|
|
#define arch_read_trylock(l) queued_read_trylock(l)
|
|
#define arch_write_trylock(l) queued_write_trylock(l)
|
|
#define arch_read_unlock(l) queued_read_unlock(l)
|
|
#define arch_write_unlock(l) queued_write_unlock(l)
|
|
#define arch_rwlock_is_contended(l) queued_rwlock_is_contended(l)
|
|
|
|
#endif /* __ASM_GENERIC_QRWLOCK_H */
|