mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
Merge branch 'locking/atomics' into locking/core, to pick up WIP commits
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
41b8687191
18
Kbuild
18
Kbuild
@ -6,7 +6,8 @@
|
||||
# 2) Generate timeconst.h
|
||||
# 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
|
||||
# 4) Check for missing system calls
|
||||
# 5) Generate constants.py (may need bounds.h)
|
||||
# 5) check atomics headers are up-to-date
|
||||
# 6) Generate constants.py (may need bounds.h)
|
||||
|
||||
#####
|
||||
# 1) Generate bounds.h
|
||||
@ -59,7 +60,20 @@ missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
|
||||
$(call cmd,syscalls)
|
||||
|
||||
#####
|
||||
# 5) Generate constants for Python GDB integration
|
||||
# 5) Check atomic headers are up-to-date
|
||||
#
|
||||
|
||||
always += old-atomics
|
||||
targets += old-atomics
|
||||
|
||||
quiet_cmd_atomics = CALL $<
|
||||
cmd_atomics = $(CONFIG_SHELL) $<
|
||||
|
||||
old-atomics: scripts/atomic/check-atomics.sh FORCE
|
||||
$(call cmd,atomics)
|
||||
|
||||
#####
|
||||
# 6) Generate constants for Python GDB integration
|
||||
#
|
||||
|
||||
extra-$(CONFIG_GDB_SCRIPTS) += build_constants_py
|
||||
|
@ -2609,6 +2609,7 @@ L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: arch/*/include/asm/atomic*.h
|
||||
F: include/*/atomic*.h
|
||||
F: scripts/atomic/
|
||||
|
||||
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
|
||||
M: Bradley Grove <linuxdrivers@attotech.com>
|
||||
|
@ -42,124 +42,131 @@
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
#define atomic_read(v) READ_ONCE((v)->counter)
|
||||
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
||||
#define arch_atomic_read(v) READ_ONCE((v)->counter)
|
||||
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
||||
|
||||
#define atomic_add_return_relaxed atomic_add_return_relaxed
|
||||
#define atomic_add_return_acquire atomic_add_return_acquire
|
||||
#define atomic_add_return_release atomic_add_return_release
|
||||
#define atomic_add_return atomic_add_return
|
||||
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
|
||||
#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire
|
||||
#define arch_atomic_add_return_release arch_atomic_add_return_release
|
||||
#define arch_atomic_add_return arch_atomic_add_return
|
||||
|
||||
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
|
||||
#define atomic_sub_return_acquire atomic_sub_return_acquire
|
||||
#define atomic_sub_return_release atomic_sub_return_release
|
||||
#define atomic_sub_return atomic_sub_return
|
||||
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
|
||||
#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire
|
||||
#define arch_atomic_sub_return_release arch_atomic_sub_return_release
|
||||
#define arch_atomic_sub_return arch_atomic_sub_return
|
||||
|
||||
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
|
||||
#define atomic_fetch_add_acquire atomic_fetch_add_acquire
|
||||
#define atomic_fetch_add_release atomic_fetch_add_release
|
||||
#define atomic_fetch_add atomic_fetch_add
|
||||
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
|
||||
#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire
|
||||
#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release
|
||||
#define arch_atomic_fetch_add arch_atomic_fetch_add
|
||||
|
||||
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
|
||||
#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
|
||||
#define atomic_fetch_sub_release atomic_fetch_sub_release
|
||||
#define atomic_fetch_sub atomic_fetch_sub
|
||||
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
|
||||
#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire
|
||||
#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release
|
||||
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
|
||||
|
||||
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
|
||||
#define atomic_fetch_and_acquire atomic_fetch_and_acquire
|
||||
#define atomic_fetch_and_release atomic_fetch_and_release
|
||||
#define atomic_fetch_and atomic_fetch_and
|
||||
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
|
||||
#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire
|
||||
#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release
|
||||
#define arch_atomic_fetch_and arch_atomic_fetch_and
|
||||
|
||||
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
|
||||
#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
|
||||
#define atomic_fetch_andnot_release atomic_fetch_andnot_release
|
||||
#define atomic_fetch_andnot atomic_fetch_andnot
|
||||
#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
|
||||
#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
|
||||
#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release
|
||||
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
|
||||
|
||||
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
|
||||
#define atomic_fetch_or_acquire atomic_fetch_or_acquire
|
||||
#define atomic_fetch_or_release atomic_fetch_or_release
|
||||
#define atomic_fetch_or atomic_fetch_or
|
||||
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
|
||||
#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire
|
||||
#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release
|
||||
#define arch_atomic_fetch_or arch_atomic_fetch_or
|
||||
|
||||
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
|
||||
#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
|
||||
#define atomic_fetch_xor_release atomic_fetch_xor_release
|
||||
#define atomic_fetch_xor atomic_fetch_xor
|
||||
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
|
||||
#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire
|
||||
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release
|
||||
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
|
||||
|
||||
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
|
||||
#define atomic_xchg_acquire(v, new) xchg_acquire(&((v)->counter), (new))
|
||||
#define atomic_xchg_release(v, new) xchg_release(&((v)->counter), (new))
|
||||
#define atomic_xchg(v, new) xchg(&((v)->counter), (new))
|
||||
#define arch_atomic_xchg_relaxed(v, new) \
|
||||
arch_xchg_relaxed(&((v)->counter), (new))
|
||||
#define arch_atomic_xchg_acquire(v, new) \
|
||||
arch_xchg_acquire(&((v)->counter), (new))
|
||||
#define arch_atomic_xchg_release(v, new) \
|
||||
arch_xchg_release(&((v)->counter), (new))
|
||||
#define arch_atomic_xchg(v, new) \
|
||||
arch_xchg(&((v)->counter), (new))
|
||||
|
||||
#define atomic_cmpxchg_relaxed(v, old, new) \
|
||||
cmpxchg_relaxed(&((v)->counter), (old), (new))
|
||||
#define atomic_cmpxchg_acquire(v, old, new) \
|
||||
cmpxchg_acquire(&((v)->counter), (old), (new))
|
||||
#define atomic_cmpxchg_release(v, old, new) \
|
||||
cmpxchg_release(&((v)->counter), (old), (new))
|
||||
#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
|
||||
#define arch_atomic_cmpxchg_relaxed(v, old, new) \
|
||||
arch_cmpxchg_relaxed(&((v)->counter), (old), (new))
|
||||
#define arch_atomic_cmpxchg_acquire(v, old, new) \
|
||||
arch_cmpxchg_acquire(&((v)->counter), (old), (new))
|
||||
#define arch_atomic_cmpxchg_release(v, old, new) \
|
||||
arch_cmpxchg_release(&((v)->counter), (old), (new))
|
||||
#define arch_atomic_cmpxchg(v, old, new) \
|
||||
arch_cmpxchg(&((v)->counter), (old), (new))
|
||||
|
||||
#define atomic_andnot atomic_andnot
|
||||
#define arch_atomic_andnot arch_atomic_andnot
|
||||
|
||||
/*
|
||||
* 64-bit atomic operations.
|
||||
* 64-bit arch_atomic operations.
|
||||
*/
|
||||
#define ATOMIC64_INIT ATOMIC_INIT
|
||||
#define atomic64_read atomic_read
|
||||
#define atomic64_set atomic_set
|
||||
#define ATOMIC64_INIT ATOMIC_INIT
|
||||
#define arch_atomic64_read arch_atomic_read
|
||||
#define arch_atomic64_set arch_atomic_set
|
||||
|
||||
#define atomic64_add_return_relaxed atomic64_add_return_relaxed
|
||||
#define atomic64_add_return_acquire atomic64_add_return_acquire
|
||||
#define atomic64_add_return_release atomic64_add_return_release
|
||||
#define atomic64_add_return atomic64_add_return
|
||||
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
|
||||
#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire
|
||||
#define arch_atomic64_add_return_release arch_atomic64_add_return_release
|
||||
#define arch_atomic64_add_return arch_atomic64_add_return
|
||||
|
||||
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
|
||||
#define atomic64_sub_return_acquire atomic64_sub_return_acquire
|
||||
#define atomic64_sub_return_release atomic64_sub_return_release
|
||||
#define atomic64_sub_return atomic64_sub_return
|
||||
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
|
||||
#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire
|
||||
#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release
|
||||
#define arch_atomic64_sub_return arch_atomic64_sub_return
|
||||
|
||||
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
|
||||
#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
|
||||
#define atomic64_fetch_add_release atomic64_fetch_add_release
|
||||
#define atomic64_fetch_add atomic64_fetch_add
|
||||
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
|
||||
#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire
|
||||
#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release
|
||||
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
|
||||
|
||||
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
|
||||
#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
|
||||
#define atomic64_fetch_sub_release atomic64_fetch_sub_release
|
||||
#define atomic64_fetch_sub atomic64_fetch_sub
|
||||
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
|
||||
#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire
|
||||
#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release
|
||||
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
|
||||
|
||||
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
|
||||
#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
|
||||
#define atomic64_fetch_and_release atomic64_fetch_and_release
|
||||
#define atomic64_fetch_and atomic64_fetch_and
|
||||
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
|
||||
#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire
|
||||
#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release
|
||||
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
|
||||
|
||||
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
|
||||
#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
|
||||
#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
|
||||
#define atomic64_fetch_andnot atomic64_fetch_andnot
|
||||
#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
|
||||
#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire
|
||||
#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release
|
||||
#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
|
||||
|
||||
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
|
||||
#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
|
||||
#define atomic64_fetch_or_release atomic64_fetch_or_release
|
||||
#define atomic64_fetch_or atomic64_fetch_or
|
||||
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
|
||||
#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire
|
||||
#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release
|
||||
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
|
||||
|
||||
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
|
||||
#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
|
||||
#define atomic64_fetch_xor_release atomic64_fetch_xor_release
|
||||
#define atomic64_fetch_xor atomic64_fetch_xor
|
||||
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
|
||||
#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire
|
||||
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release
|
||||
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
|
||||
|
||||
#define atomic64_xchg_relaxed atomic_xchg_relaxed
|
||||
#define atomic64_xchg_acquire atomic_xchg_acquire
|
||||
#define atomic64_xchg_release atomic_xchg_release
|
||||
#define atomic64_xchg atomic_xchg
|
||||
#define arch_atomic64_xchg_relaxed arch_atomic_xchg_relaxed
|
||||
#define arch_atomic64_xchg_acquire arch_atomic_xchg_acquire
|
||||
#define arch_atomic64_xchg_release arch_atomic_xchg_release
|
||||
#define arch_atomic64_xchg arch_atomic_xchg
|
||||
|
||||
#define atomic64_cmpxchg_relaxed atomic_cmpxchg_relaxed
|
||||
#define atomic64_cmpxchg_acquire atomic_cmpxchg_acquire
|
||||
#define atomic64_cmpxchg_release atomic_cmpxchg_release
|
||||
#define atomic64_cmpxchg atomic_cmpxchg
|
||||
#define arch_atomic64_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
|
||||
#define arch_atomic64_cmpxchg_acquire arch_atomic_cmpxchg_acquire
|
||||
#define arch_atomic64_cmpxchg_release arch_atomic_cmpxchg_release
|
||||
#define arch_atomic64_cmpxchg arch_atomic_cmpxchg
|
||||
|
||||
#define atomic64_andnot atomic64_andnot
|
||||
#define arch_atomic64_andnot arch_atomic64_andnot
|
||||
|
||||
#define atomic64_dec_if_positive atomic64_dec_if_positive
|
||||
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
|
||||
|
||||
#include <asm-generic/atomic-instrumented.h>
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -39,7 +39,7 @@
|
||||
|
||||
#define ATOMIC_OP(op, asm_op) \
|
||||
__LL_SC_INLINE void \
|
||||
__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
|
||||
__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int result; \
|
||||
@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: "Ir" (i)); \
|
||||
} \
|
||||
__LL_SC_EXPORT(atomic_##op);
|
||||
__LL_SC_EXPORT(arch_atomic_##op);
|
||||
|
||||
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
|
||||
__LL_SC_INLINE int \
|
||||
__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
|
||||
__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int result; \
|
||||
@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v)) \
|
||||
\
|
||||
return result; \
|
||||
} \
|
||||
__LL_SC_EXPORT(atomic_##op##_return##name);
|
||||
__LL_SC_EXPORT(arch_atomic_##op##_return##name);
|
||||
|
||||
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
|
||||
__LL_SC_INLINE int \
|
||||
__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
|
||||
__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
|
||||
{ \
|
||||
unsigned long tmp; \
|
||||
int val, result; \
|
||||
@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v)) \
|
||||
\
|
||||
return result; \
|
||||
} \
|
||||
__LL_SC_EXPORT(atomic_fetch_##op##name);
|
||||
__LL_SC_EXPORT(arch_atomic_fetch_##op##name);
|
||||
|
||||
#define ATOMIC_OPS(...) \
|
||||
ATOMIC_OP(__VA_ARGS__) \
|
||||
@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor)
|
||||
|
||||
#define ATOMIC64_OP(op, asm_op) \
|
||||
__LL_SC_INLINE void \
|
||||
__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
|
||||
__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \
|
||||
{ \
|
||||
long result; \
|
||||
unsigned long tmp; \
|
||||
@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
||||
: "Ir" (i)); \
|
||||
} \
|
||||
__LL_SC_EXPORT(atomic64_##op);
|
||||
__LL_SC_EXPORT(arch_atomic64_##op);
|
||||
|
||||
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
|
||||
__LL_SC_INLINE long \
|
||||
__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
|
||||
__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
|
||||
{ \
|
||||
long result; \
|
||||
unsigned long tmp; \
|
||||
@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v)) \
|
||||
\
|
||||
return result; \
|
||||
} \
|
||||
__LL_SC_EXPORT(atomic64_##op##_return##name);
|
||||
__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
|
||||
|
||||
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
|
||||
__LL_SC_INLINE long \
|
||||
__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
|
||||
__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \
|
||||
{ \
|
||||
long result, val; \
|
||||
unsigned long tmp; \
|
||||
@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v)) \
|
||||
\
|
||||
return result; \
|
||||
} \
|
||||
__LL_SC_EXPORT(atomic64_fetch_##op##name);
|
||||
__LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
|
||||
|
||||
#define ATOMIC64_OPS(...) \
|
||||
ATOMIC64_OP(__VA_ARGS__) \
|
||||
@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor)
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
__LL_SC_INLINE long
|
||||
__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
|
||||
__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
|
||||
{
|
||||
long result;
|
||||
unsigned long tmp;
|
||||
@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
|
||||
|
||||
return result;
|
||||
}
|
||||
__LL_SC_EXPORT(atomic64_dec_if_positive);
|
||||
__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
|
||||
|
||||
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
|
||||
__LL_SC_INLINE u##sz \
|
||||
|
@ -25,9 +25,9 @@
|
||||
#error "please don't include this file directly"
|
||||
#endif
|
||||
|
||||
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
|
||||
#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op)
|
||||
#define ATOMIC_OP(op, asm_op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
static inline void arch_atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
register int w0 asm ("w0") = i; \
|
||||
register atomic_t *x1 asm ("x1") = v; \
|
||||
@ -47,7 +47,7 @@ ATOMIC_OP(add, stadd)
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \
|
||||
static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
|
||||
static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
register int w0 asm ("w0") = i; \
|
||||
register atomic_t *x1 asm ("x1") = v; \
|
||||
@ -79,7 +79,7 @@ ATOMIC_FETCH_OPS(add, ldadd)
|
||||
#undef ATOMIC_FETCH_OPS
|
||||
|
||||
#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
|
||||
static inline int atomic_add_return##name(int i, atomic_t *v) \
|
||||
static inline int arch_atomic_add_return##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
register int w0 asm ("w0") = i; \
|
||||
register atomic_t *x1 asm ("x1") = v; \
|
||||
@ -105,7 +105,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
|
||||
|
||||
#undef ATOMIC_OP_ADD_RETURN
|
||||
|
||||
static inline void atomic_and(int i, atomic_t *v)
|
||||
static inline void arch_atomic_and(int i, atomic_t *v)
|
||||
{
|
||||
register int w0 asm ("w0") = i;
|
||||
register atomic_t *x1 asm ("x1") = v;
|
||||
@ -123,7 +123,7 @@ static inline void atomic_and(int i, atomic_t *v)
|
||||
}
|
||||
|
||||
#define ATOMIC_FETCH_OP_AND(name, mb, cl...) \
|
||||
static inline int atomic_fetch_and##name(int i, atomic_t *v) \
|
||||
static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
register int w0 asm ("w0") = i; \
|
||||
register atomic_t *x1 asm ("x1") = v; \
|
||||
@ -149,7 +149,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
|
||||
|
||||
#undef ATOMIC_FETCH_OP_AND
|
||||
|
||||
static inline void atomic_sub(int i, atomic_t *v)
|
||||
static inline void arch_atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
register int w0 asm ("w0") = i;
|
||||
register atomic_t *x1 asm ("x1") = v;
|
||||
@ -167,7 +167,7 @@ static inline void atomic_sub(int i, atomic_t *v)
|
||||
}
|
||||
|
||||
#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
|
||||
static inline int atomic_sub_return##name(int i, atomic_t *v) \
|
||||
static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
register int w0 asm ("w0") = i; \
|
||||
register atomic_t *x1 asm ("x1") = v; \
|
||||
@ -195,7 +195,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
|
||||
#undef ATOMIC_OP_SUB_RETURN
|
||||
|
||||
#define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \
|
||||
static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
|
||||
static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
register int w0 asm ("w0") = i; \
|
||||
register atomic_t *x1 asm ("x1") = v; \
|
||||
@ -222,9 +222,9 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
|
||||
#undef ATOMIC_FETCH_OP_SUB
|
||||
#undef __LL_SC_ATOMIC
|
||||
|
||||
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
|
||||
#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op)
|
||||
#define ATOMIC64_OP(op, asm_op) \
|
||||
static inline void atomic64_##op(long i, atomic64_t *v) \
|
||||
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
register long x0 asm ("x0") = i; \
|
||||
register atomic64_t *x1 asm ("x1") = v; \
|
||||
@ -244,7 +244,7 @@ ATOMIC64_OP(add, stadd)
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
|
||||
static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
|
||||
static inline long arch_atomic64_fetch_##op##name(long i, atomic64_t *v)\
|
||||
{ \
|
||||
register long x0 asm ("x0") = i; \
|
||||
register atomic64_t *x1 asm ("x1") = v; \
|
||||
@ -276,7 +276,7 @@ ATOMIC64_FETCH_OPS(add, ldadd)
|
||||
#undef ATOMIC64_FETCH_OPS
|
||||
|
||||
#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
|
||||
static inline long atomic64_add_return##name(long i, atomic64_t *v) \
|
||||
static inline long arch_atomic64_add_return##name(long i, atomic64_t *v)\
|
||||
{ \
|
||||
register long x0 asm ("x0") = i; \
|
||||
register atomic64_t *x1 asm ("x1") = v; \
|
||||
@ -302,7 +302,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
|
||||
|
||||
#undef ATOMIC64_OP_ADD_RETURN
|
||||
|
||||
static inline void atomic64_and(long i, atomic64_t *v)
|
||||
static inline void arch_atomic64_and(long i, atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = i;
|
||||
register atomic64_t *x1 asm ("x1") = v;
|
||||
@ -320,7 +320,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
|
||||
}
|
||||
|
||||
#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
|
||||
static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
|
||||
static inline long arch_atomic64_fetch_and##name(long i, atomic64_t *v) \
|
||||
{ \
|
||||
register long x0 asm ("x0") = i; \
|
||||
register atomic64_t *x1 asm ("x1") = v; \
|
||||
@ -346,7 +346,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
|
||||
|
||||
#undef ATOMIC64_FETCH_OP_AND
|
||||
|
||||
static inline void atomic64_sub(long i, atomic64_t *v)
|
||||
static inline void arch_atomic64_sub(long i, atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = i;
|
||||
register atomic64_t *x1 asm ("x1") = v;
|
||||
@ -364,7 +364,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
|
||||
static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
|
||||
static inline long arch_atomic64_sub_return##name(long i, atomic64_t *v)\
|
||||
{ \
|
||||
register long x0 asm ("x0") = i; \
|
||||
register atomic64_t *x1 asm ("x1") = v; \
|
||||
@ -392,7 +392,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
|
||||
#undef ATOMIC64_OP_SUB_RETURN
|
||||
|
||||
#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
|
||||
static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
|
||||
static inline long arch_atomic64_fetch_sub##name(long i, atomic64_t *v) \
|
||||
{ \
|
||||
register long x0 asm ("x0") = i; \
|
||||
register atomic64_t *x1 asm ("x1") = v; \
|
||||
@ -418,7 +418,7 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory")
|
||||
|
||||
#undef ATOMIC64_FETCH_OP_SUB
|
||||
|
||||
static inline long atomic64_dec_if_positive(atomic64_t *v)
|
||||
static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
register long x0 asm ("x0") = (long)v;
|
||||
|
||||
|
@ -110,10 +110,10 @@ __XCHG_GEN(_mb)
|
||||
})
|
||||
|
||||
/* xchg */
|
||||
#define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
|
||||
#define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
|
||||
#define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
|
||||
#define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
|
||||
#define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
|
||||
#define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
|
||||
#define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
|
||||
#define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
|
||||
|
||||
#define __CMPXCHG_GEN(sfx) \
|
||||
static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
|
||||
@ -154,18 +154,18 @@ __CMPXCHG_GEN(_mb)
|
||||
})
|
||||
|
||||
/* cmpxchg */
|
||||
#define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
|
||||
#define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
|
||||
#define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
|
||||
#define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
|
||||
#define cmpxchg_local cmpxchg_relaxed
|
||||
#define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
|
||||
#define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
|
||||
#define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
|
||||
#define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
|
||||
#define arch_cmpxchg_local arch_cmpxchg_relaxed
|
||||
|
||||
/* cmpxchg64 */
|
||||
#define cmpxchg64_relaxed cmpxchg_relaxed
|
||||
#define cmpxchg64_acquire cmpxchg_acquire
|
||||
#define cmpxchg64_release cmpxchg_release
|
||||
#define cmpxchg64 cmpxchg
|
||||
#define cmpxchg64_local cmpxchg_local
|
||||
#define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
|
||||
#define arch_cmpxchg64_acquire arch_cmpxchg_acquire
|
||||
#define arch_cmpxchg64_release arch_cmpxchg_release
|
||||
#define arch_cmpxchg64 arch_cmpxchg
|
||||
#define arch_cmpxchg64_local arch_cmpxchg_local
|
||||
|
||||
/* cmpxchg_double */
|
||||
#define system_has_cmpxchg_double() 1
|
||||
@ -177,24 +177,24 @@ __CMPXCHG_GEN(_mb)
|
||||
VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
|
||||
})
|
||||
|
||||
#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
|
||||
({\
|
||||
int __ret;\
|
||||
__cmpxchg_double_check(ptr1, ptr2); \
|
||||
__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
|
||||
(unsigned long)(n1), (unsigned long)(n2), \
|
||||
ptr1); \
|
||||
__ret; \
|
||||
#define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
|
||||
({ \
|
||||
int __ret; \
|
||||
__cmpxchg_double_check(ptr1, ptr2); \
|
||||
__ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
|
||||
(unsigned long)(n1), (unsigned long)(n2), \
|
||||
ptr1); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
|
||||
({\
|
||||
int __ret;\
|
||||
__cmpxchg_double_check(ptr1, ptr2); \
|
||||
__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
|
||||
(unsigned long)(n1), (unsigned long)(n2), \
|
||||
ptr1); \
|
||||
__ret; \
|
||||
#define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
|
||||
({ \
|
||||
int __ret; \
|
||||
__cmpxchg_double_check(ptr1, ptr2); \
|
||||
__ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
|
||||
(unsigned long)(n1), (unsigned long)(n2), \
|
||||
ptr1); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define __CMPWAIT_CASE(w, sfx, sz) \
|
||||
|
@ -15,13 +15,13 @@
|
||||
* ops which are SMP safe even on a UP kernel.
|
||||
*/
|
||||
|
||||
#define sync_set_bit(nr, p) set_bit(nr, p)
|
||||
#define sync_clear_bit(nr, p) clear_bit(nr, p)
|
||||
#define sync_change_bit(nr, p) change_bit(nr, p)
|
||||
#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
|
||||
#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
|
||||
#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
|
||||
#define sync_test_bit(nr, addr) test_bit(nr, addr)
|
||||
#define sync_cmpxchg cmpxchg
|
||||
#define sync_set_bit(nr, p) set_bit(nr, p)
|
||||
#define sync_clear_bit(nr, p) clear_bit(nr, p)
|
||||
#define sync_change_bit(nr, p) change_bit(nr, p)
|
||||
#define sync_test_and_set_bit(nr, p) test_and_set_bit(nr, p)
|
||||
#define sync_test_and_clear_bit(nr, p) test_and_clear_bit(nr, p)
|
||||
#define sync_test_and_change_bit(nr, p) test_and_change_bit(nr, p)
|
||||
#define sync_test_bit(nr, addr) test_bit(nr, addr)
|
||||
#define arch_sync_cmpxchg arch_cmpxchg
|
||||
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
2294
include/linux/atomic-fallback.h
Normal file
2294
include/linux/atomic-fallback.h
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
186
scripts/atomic/atomic-tbl.sh
Executable file
186
scripts/atomic/atomic-tbl.sh
Executable file
@ -0,0 +1,186 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# helpers for dealing with atomics.tbl
|
||||
|
||||
#meta_in(meta, match)
|
||||
meta_in()
|
||||
{
|
||||
case "$1" in
|
||||
[$2]) return 0;;
|
||||
esac
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
#meta_has_ret(meta)
|
||||
meta_has_ret()
|
||||
{
|
||||
meta_in "$1" "bBiIfFlR"
|
||||
}
|
||||
|
||||
#meta_has_acquire(meta)
|
||||
meta_has_acquire()
|
||||
{
|
||||
meta_in "$1" "BFIlR"
|
||||
}
|
||||
|
||||
#meta_has_release(meta)
|
||||
meta_has_release()
|
||||
{
|
||||
meta_in "$1" "BFIRs"
|
||||
}
|
||||
|
||||
#meta_has_relaxed(meta)
|
||||
meta_has_relaxed()
|
||||
{
|
||||
meta_in "$1" "BFIR"
|
||||
}
|
||||
|
||||
#find_fallback_template(pfx, name, sfx, order)
|
||||
find_fallback_template()
|
||||
{
|
||||
local pfx="$1"; shift
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
local order="$1"; shift
|
||||
|
||||
local base=""
|
||||
local file=""
|
||||
|
||||
# We may have fallbacks for a specific case (e.g. read_acquire()), or
|
||||
# an entire class, e.g. *inc*().
|
||||
#
|
||||
# Start at the most specific, and fall back to the most general. Once
|
||||
# we find a specific fallback, don't bother looking for more.
|
||||
for base in "${pfx}${name}${sfx}${order}" "${name}"; do
|
||||
file="${ATOMICDIR}/fallbacks/${base}"
|
||||
|
||||
if [ -f "${file}" ]; then
|
||||
printf "${file}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
#gen_ret_type(meta, int)
|
||||
gen_ret_type() {
|
||||
local meta="$1"; shift
|
||||
local int="$1"; shift
|
||||
|
||||
case "${meta}" in
|
||||
[sv]) printf "void";;
|
||||
[bB]) printf "bool";;
|
||||
[aiIfFlR]) printf "${int}";;
|
||||
esac
|
||||
}
|
||||
|
||||
#gen_ret_stmt(meta)
|
||||
gen_ret_stmt()
|
||||
{
|
||||
if meta_has_ret "${meta}"; then
|
||||
printf "return ";
|
||||
fi
|
||||
}
|
||||
|
||||
# gen_param_name(arg)
|
||||
gen_param_name()
|
||||
{
|
||||
# strip off the leading 'c' for 'cv'
|
||||
local name="${1#c}"
|
||||
printf "${name#*:}"
|
||||
}
|
||||
|
||||
# gen_param_type(arg, int, atomic)
|
||||
gen_param_type()
|
||||
{
|
||||
local type="${1%%:*}"; shift
|
||||
local int="$1"; shift
|
||||
local atomic="$1"; shift
|
||||
|
||||
case "${type}" in
|
||||
i) type="${int} ";;
|
||||
p) type="${int} *";;
|
||||
v) type="${atomic}_t *";;
|
||||
cv) type="const ${atomic}_t *";;
|
||||
esac
|
||||
|
||||
printf "${type}"
|
||||
}
|
||||
|
||||
#gen_param(arg, int, atomic)
|
||||
gen_param()
|
||||
{
|
||||
local arg="$1"; shift
|
||||
local int="$1"; shift
|
||||
local atomic="$1"; shift
|
||||
local name="$(gen_param_name "${arg}")"
|
||||
local type="$(gen_param_type "${arg}" "${int}" "${atomic}")"
|
||||
|
||||
printf "${type}${name}"
|
||||
}
|
||||
|
||||
#gen_params(int, atomic, arg...)
|
||||
gen_params()
|
||||
{
|
||||
local int="$1"; shift
|
||||
local atomic="$1"; shift
|
||||
|
||||
while [ "$#" -gt 0 ]; do
|
||||
gen_param "$1" "${int}" "${atomic}"
|
||||
[ "$#" -gt 1 ] && printf ", "
|
||||
shift;
|
||||
done
|
||||
}
|
||||
|
||||
#gen_args(arg...)
|
||||
gen_args()
|
||||
{
|
||||
while [ "$#" -gt 0 ]; do
|
||||
printf "$(gen_param_name "$1")"
|
||||
[ "$#" -gt 1 ] && printf ", "
|
||||
shift;
|
||||
done
|
||||
}
|
||||
|
||||
#gen_proto_order_variants(meta, pfx, name, sfx, ...)
|
||||
gen_proto_order_variants()
|
||||
{
|
||||
local meta="$1"; shift
|
||||
local pfx="$1"; shift
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
|
||||
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
|
||||
|
||||
if meta_has_acquire "${meta}"; then
|
||||
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
|
||||
fi
|
||||
if meta_has_release "${meta}"; then
|
||||
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
|
||||
fi
|
||||
if meta_has_relaxed "${meta}"; then
|
||||
gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
|
||||
fi
|
||||
}
|
||||
|
||||
#gen_proto_variants(meta, name, ...)
|
||||
gen_proto_variants()
|
||||
{
|
||||
local meta="$1"; shift
|
||||
local name="$1"; shift
|
||||
local pfx=""
|
||||
local sfx=""
|
||||
|
||||
meta_in "${meta}" "fF" && pfx="fetch_"
|
||||
meta_in "${meta}" "R" && sfx="_return"
|
||||
|
||||
gen_proto_order_variants "${meta}" "${pfx}" "${name}" "${sfx}" "$@"
|
||||
}
|
||||
|
||||
#gen_proto(meta, ...)
|
||||
gen_proto() {
|
||||
local meta="$1"; shift
|
||||
for m in $(echo "${meta}" | fold -w1); do
|
||||
gen_proto_variants "${m}" "$@"
|
||||
done
|
||||
}
|
41
scripts/atomic/atomics.tbl
Executable file
41
scripts/atomic/atomics.tbl
Executable file
@ -0,0 +1,41 @@
|
||||
# name meta args...
|
||||
#
|
||||
# Where meta contains a string of variants to generate.
|
||||
# Upper-case implies _{acquire,release,relaxed} variants.
|
||||
# Valid meta values are:
|
||||
# * B/b - bool: returns bool
|
||||
# * v - void: returns void
|
||||
# * I/i - int: returns base type
|
||||
# * R - return: returns base type (has _return variants)
|
||||
# * F/f - fetch: returns base type (has fetch_ variants)
|
||||
# * l - load: returns base type (has _acquire order variant)
|
||||
# * s - store: returns void (has _release order variant)
|
||||
#
|
||||
# Where args contains list of type[:name], where type is:
|
||||
# * cv - const pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t)
|
||||
# * v - pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t)
|
||||
# * i - base type (int/s64/long)
|
||||
# * p - pointer to base type (int/s64/long)
|
||||
#
|
||||
read l cv
|
||||
set s v i
|
||||
add vRF i v
|
||||
sub vRF i v
|
||||
inc vRF v
|
||||
dec vRF v
|
||||
and vF i v
|
||||
andnot vF i v
|
||||
or vF i v
|
||||
xor vF i v
|
||||
xchg I v i
|
||||
cmpxchg I v i:old i:new
|
||||
try_cmpxchg B v p:old i:new
|
||||
sub_and_test b i v
|
||||
dec_and_test b v
|
||||
inc_and_test b v
|
||||
add_negative b i v
|
||||
add_unless fb v i:a i:u
|
||||
inc_not_zero b v
|
||||
inc_unless_negative b v
|
||||
dec_unless_positive b v
|
||||
dec_if_positive i v
|
19
scripts/atomic/check-atomics.sh
Executable file
19
scripts/atomic/check-atomics.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Check if atomic headers are up-to-date
|
||||
|
||||
ATOMICDIR=$(dirname $0)
|
||||
ATOMICTBL=${ATOMICDIR}/atomics.tbl
|
||||
LINUXDIR=${ATOMICDIR}/../..
|
||||
|
||||
cat <<EOF |
|
||||
gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
|
||||
gen-atomic-long.sh asm-generic/atomic-long.h
|
||||
gen-atomic-fallback.sh linux/atomic-fallback.h
|
||||
EOF
|
||||
while read script header; do
|
||||
if ! (${ATOMICDIR}/${script} ${ATOMICTBL} | diff - ${LINUXDIR}/include/${header} > /dev/null); then
|
||||
printf "warning: include/${header} is out-of-date.\n"
|
||||
fi
|
||||
done
|
9
scripts/atomic/fallbacks/acquire
Executable file
9
scripts/atomic/fallbacks/acquire
Executable file
@ -0,0 +1,9 @@
|
||||
cat <<EOF
|
||||
static inline ${ret}
|
||||
${atomic}_${pfx}${name}${sfx}_acquire(${params})
|
||||
{
|
||||
${ret} ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
||||
__atomic_acquire_fence();
|
||||
return ret;
|
||||
}
|
||||
EOF
|
16
scripts/atomic/fallbacks/add_negative
Executable file
16
scripts/atomic/fallbacks/add_negative
Executable file
@ -0,0 +1,16 @@
|
||||
cat <<EOF
|
||||
/**
|
||||
* ${atomic}_add_negative - add and test if negative
|
||||
* @i: integer value to add
|
||||
* @v: pointer of type ${atomic}_t
|
||||
*
|
||||
* Atomically adds @i to @v and returns true
|
||||
* if the result is negative, or false when
|
||||
* result is greater than or equal to zero.
|
||||
*/
|
||||
static inline bool
|
||||
${atomic}_add_negative(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
return ${atomic}_add_return(i, v) < 0;
|
||||
}
|
||||
EOF
|
16
scripts/atomic/fallbacks/add_unless
Executable file
16
scripts/atomic/fallbacks/add_unless
Executable file
@ -0,0 +1,16 @@
|
||||
cat << EOF
|
||||
/**
|
||||
* ${atomic}_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type ${atomic}_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, if @v was not already @u.
|
||||
* Returns true if the addition was done.
|
||||
*/
|
||||
static inline bool
|
||||
${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
{
|
||||
return ${atomic}_fetch_add_unless(v, a, u) != u;
|
||||
}
|
||||
EOF
|
7
scripts/atomic/fallbacks/andnot
Executable file
7
scripts/atomic/fallbacks/andnot
Executable file
@ -0,0 +1,7 @@
|
||||
cat <<EOF
|
||||
static inline ${ret}
|
||||
${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
${retstmt}${atomic}_${pfx}and${sfx}${order}(~i, v);
|
||||
}
|
||||
EOF
|
7
scripts/atomic/fallbacks/dec
Executable file
7
scripts/atomic/fallbacks/dec
Executable file
@ -0,0 +1,7 @@
|
||||
cat <<EOF
|
||||
static inline ${ret}
|
||||
${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
|
||||
{
|
||||
${retstmt}${atomic}_${pfx}sub${sfx}${order}(1, v);
|
||||
}
|
||||
EOF
|
15
scripts/atomic/fallbacks/dec_and_test
Executable file
15
scripts/atomic/fallbacks/dec_and_test
Executable file
@ -0,0 +1,15 @@
|
||||
cat <<EOF
|
||||
/**
|
||||
* ${atomic}_dec_and_test - decrement and test
|
||||
* @v: pointer of type ${atomic}_t
|
||||
*
|
||||
* Atomically decrements @v by 1 and
|
||||
* returns true if the result is 0, or false for all other
|
||||
* cases.
|
||||
*/
|
||||
static inline bool
|
||||
${atomic}_dec_and_test(${atomic}_t *v)
|
||||
{
|
||||
return ${atomic}_dec_return(v) == 0;
|
||||
}
|
||||
EOF
|
15
scripts/atomic/fallbacks/dec_if_positive
Executable file
15
scripts/atomic/fallbacks/dec_if_positive
Executable file
@ -0,0 +1,15 @@
|
||||
cat <<EOF
|
||||
static inline ${ret}
|
||||
${atomic}_dec_if_positive(${atomic}_t *v)
|
||||
{
|
||||
${int} dec, c = ${atomic}_read(v);
|
||||
|
||||
do {
|
||||
dec = c - 1;
|
||||
if (unlikely(dec < 0))
|
||||
break;
|
||||
} while (!${atomic}_try_cmpxchg(v, &c, dec));
|
||||
|
||||
return dec;
|
||||
}
|
||||
EOF
|
14
scripts/atomic/fallbacks/dec_unless_positive
Executable file
14
scripts/atomic/fallbacks/dec_unless_positive
Executable file
@ -0,0 +1,14 @@
|
||||
cat <<EOF
|
||||
static inline bool
|
||||
${atomic}_dec_unless_positive(${atomic}_t *v)
|
||||
{
|
||||
${int} c = ${atomic}_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c > 0))
|
||||
return false;
|
||||
} while (!${atomic}_try_cmpxchg(v, &c, c - 1));
|
||||
|
||||
return true;
|
||||
}
|
||||
EOF
|
11
scripts/atomic/fallbacks/fence
Executable file
11
scripts/atomic/fallbacks/fence
Executable file
@ -0,0 +1,11 @@
|
||||
cat <<EOF
|
||||
static inline ${ret}
|
||||
${atomic}_${pfx}${name}${sfx}(${params})
|
||||
{
|
||||
${ret} ret;
|
||||
__atomic_pre_full_fence();
|
||||
ret = ${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
||||
__atomic_post_full_fence();
|
||||
return ret;
|
||||
}
|
||||
EOF
|
23
scripts/atomic/fallbacks/fetch_add_unless
Executable file
23
scripts/atomic/fallbacks/fetch_add_unless
Executable file
@ -0,0 +1,23 @@
|
||||
cat << EOF
|
||||
/**
|
||||
* ${atomic}_fetch_add_unless - add unless the number is already a given value
|
||||
* @v: pointer of type ${atomic}_t
|
||||
* @a: the amount to add to v...
|
||||
* @u: ...unless v is equal to u.
|
||||
*
|
||||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns original value of @v
|
||||
*/
|
||||
static inline ${int}
|
||||
${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
{
|
||||
${int} c = ${atomic}_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c == u))
|
||||
break;
|
||||
} while (!${atomic}_try_cmpxchg(v, &c, c + a));
|
||||
|
||||
return c;
|
||||
}
|
||||
EOF
|
7
scripts/atomic/fallbacks/inc
Executable file
7
scripts/atomic/fallbacks/inc
Executable file
@ -0,0 +1,7 @@
|
||||
cat <<EOF
|
||||
static inline ${ret}
|
||||
${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
|
||||
{
|
||||
${retstmt}${atomic}_${pfx}add${sfx}${order}(1, v);
|
||||
}
|
||||
EOF
|
15
scripts/atomic/fallbacks/inc_and_test
Executable file
15
scripts/atomic/fallbacks/inc_and_test
Executable file
@ -0,0 +1,15 @@
|
||||
cat <<EOF
|
||||
/**
|
||||
* ${atomic}_inc_and_test - increment and test
|
||||
* @v: pointer of type ${atomic}_t
|
||||
*
|
||||
* Atomically increments @v by 1
|
||||
* and returns true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline bool
|
||||
${atomic}_inc_and_test(${atomic}_t *v)
|
||||
{
|
||||
return ${atomic}_inc_return(v) == 0;
|
||||
}
|
||||
EOF
|
14
scripts/atomic/fallbacks/inc_not_zero
Executable file
14
scripts/atomic/fallbacks/inc_not_zero
Executable file
@ -0,0 +1,14 @@
|
||||
cat <<EOF
|
||||
/**
|
||||
* ${atomic}_inc_not_zero - increment unless the number is zero
|
||||
* @v: pointer of type ${atomic}_t
|
||||
*
|
||||
* Atomically increments @v by 1, if @v is non-zero.
|
||||
* Returns true if the increment was done.
|
||||
*/
|
||||
static inline bool
|
||||
${atomic}_inc_not_zero(${atomic}_t *v)
|
||||
{
|
||||
return ${atomic}_add_unless(v, 1, 0);
|
||||
}
|
||||
EOF
|
14
scripts/atomic/fallbacks/inc_unless_negative
Executable file
14
scripts/atomic/fallbacks/inc_unless_negative
Executable file
@ -0,0 +1,14 @@
|
||||
cat <<EOF
|
||||
static inline bool
|
||||
${atomic}_inc_unless_negative(${atomic}_t *v)
|
||||
{
|
||||
${int} c = ${atomic}_read(v);
|
||||
|
||||
do {
|
||||
if (unlikely(c < 0))
|
||||
return false;
|
||||
} while (!${atomic}_try_cmpxchg(v, &c, c + 1));
|
||||
|
||||
return true;
|
||||
}
|
||||
EOF
|
7
scripts/atomic/fallbacks/read_acquire
Executable file
7
scripts/atomic/fallbacks/read_acquire
Executable file
@ -0,0 +1,7 @@
|
||||
cat <<EOF
|
||||
static inline ${ret}
|
||||
${atomic}_read_acquire(const ${atomic}_t *v)
|
||||
{
|
||||
return smp_load_acquire(&(v)->counter);
|
||||
}
|
||||
EOF
|
8
scripts/atomic/fallbacks/release
Executable file
8
scripts/atomic/fallbacks/release
Executable file
@ -0,0 +1,8 @@
|
||||
cat <<EOF
|
||||
static inline ${ret}
|
||||
${atomic}_${pfx}${name}${sfx}_release(${params})
|
||||
{
|
||||
__atomic_release_fence();
|
||||
${retstmt}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
||||
}
|
||||
EOF
|
7
scripts/atomic/fallbacks/set_release
Executable file
7
scripts/atomic/fallbacks/set_release
Executable file
@ -0,0 +1,7 @@
|
||||
cat <<EOF
|
||||
static inline void
|
||||
${atomic}_set_release(${atomic}_t *v, ${int} i)
|
||||
{
|
||||
smp_store_release(&(v)->counter, i);
|
||||
}
|
||||
EOF
|
16
scripts/atomic/fallbacks/sub_and_test
Executable file
16
scripts/atomic/fallbacks/sub_and_test
Executable file
@ -0,0 +1,16 @@
|
||||
cat <<EOF
|
||||
/**
|
||||
* ${atomic}_sub_and_test - subtract value from variable and test result
|
||||
* @i: integer value to subtract
|
||||
* @v: pointer of type ${atomic}_t
|
||||
*
|
||||
* Atomically subtracts @i from @v and returns
|
||||
* true if the result is zero, or false for all
|
||||
* other cases.
|
||||
*/
|
||||
static inline bool
|
||||
${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
return ${atomic}_sub_return(i, v) == 0;
|
||||
}
|
||||
EOF
|
11
scripts/atomic/fallbacks/try_cmpxchg
Executable file
11
scripts/atomic/fallbacks/try_cmpxchg
Executable file
@ -0,0 +1,11 @@
|
||||
cat <<EOF
|
||||
static inline bool
|
||||
${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
|
||||
{
|
||||
${int} r, o = *old;
|
||||
r = ${atomic}_cmpxchg${order}(v, o, new);
|
||||
if (unlikely(r != o))
|
||||
*old = r;
|
||||
return likely(r == o);
|
||||
}
|
||||
EOF
|
181
scripts/atomic/gen-atomic-fallback.sh
Executable file
181
scripts/atomic/gen-atomic-fallback.sh
Executable file
@ -0,0 +1,181 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
ATOMICDIR=$(dirname $0)
|
||||
|
||||
. ${ATOMICDIR}/atomic-tbl.sh
|
||||
|
||||
#gen_template_fallback(template, meta, pfx, name, sfx, order, atomic, int, args...)
|
||||
gen_template_fallback()
|
||||
{
|
||||
local template="$1"; shift
|
||||
local meta="$1"; shift
|
||||
local pfx="$1"; shift
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
local order="$1"; shift
|
||||
local atomic="$1"; shift
|
||||
local int="$1"; shift
|
||||
|
||||
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
|
||||
|
||||
local ret="$(gen_ret_type "${meta}" "${int}")"
|
||||
local retstmt="$(gen_ret_stmt "${meta}")"
|
||||
local params="$(gen_params "${int}" "${atomic}" "$@")"
|
||||
local args="$(gen_args "$@")"
|
||||
|
||||
if [ ! -z "${template}" ]; then
|
||||
printf "#ifndef ${atomicname}\n"
|
||||
. ${template}
|
||||
printf "#define ${atomicname} ${atomicname}\n"
|
||||
printf "#endif\n\n"
|
||||
fi
|
||||
}
|
||||
|
||||
#gen_proto_fallback(meta, pfx, name, sfx, order, atomic, int, args...)
|
||||
gen_proto_fallback()
|
||||
{
|
||||
local meta="$1"; shift
|
||||
local pfx="$1"; shift
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
local order="$1"; shift
|
||||
|
||||
local tmpl="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
|
||||
gen_template_fallback "${tmpl}" "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
|
||||
}
|
||||
|
||||
#gen_basic_fallbacks(basename)
|
||||
gen_basic_fallbacks()
|
||||
{
|
||||
local basename="$1"; shift
|
||||
cat << EOF
|
||||
#define ${basename}_acquire ${basename}
|
||||
#define ${basename}_release ${basename}
|
||||
#define ${basename}_relaxed ${basename}
|
||||
EOF
|
||||
}
|
||||
|
||||
#gen_proto_order_variants(meta, pfx, name, sfx, atomic, int, args...)
|
||||
gen_proto_order_variants()
|
||||
{
|
||||
local meta="$1"; shift
|
||||
local pfx="$1"; shift
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
local atomic="$1"
|
||||
|
||||
local basename="${atomic}_${pfx}${name}${sfx}"
|
||||
|
||||
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
|
||||
|
||||
# If we don't have relaxed atomics, then we don't bother with ordering fallbacks
|
||||
# read_acquire and set_release need to be templated, though
|
||||
if ! meta_has_relaxed "${meta}"; then
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
|
||||
|
||||
if meta_has_acquire "${meta}"; then
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
|
||||
fi
|
||||
|
||||
if meta_has_release "${meta}"; then
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
|
||||
fi
|
||||
|
||||
return
|
||||
fi
|
||||
|
||||
printf "#ifndef ${basename}_relaxed\n"
|
||||
|
||||
if [ ! -z "${template}" ]; then
|
||||
printf "#ifdef ${basename}\n"
|
||||
fi
|
||||
|
||||
gen_basic_fallbacks "${basename}"
|
||||
|
||||
if [ ! -z "${template}" ]; then
|
||||
printf "#endif /* ${atomic}_${pfx}${name}${sfx} */\n\n"
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
|
||||
fi
|
||||
|
||||
printf "#else /* ${basename}_relaxed */\n\n"
|
||||
|
||||
gen_template_fallback "${ATOMICDIR}/fallbacks/acquire" "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
|
||||
gen_template_fallback "${ATOMICDIR}/fallbacks/release" "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
|
||||
gen_template_fallback "${ATOMICDIR}/fallbacks/fence" "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
|
||||
|
||||
printf "#endif /* ${basename}_relaxed */\n\n"
|
||||
}
|
||||
|
||||
gen_xchg_fallbacks()
|
||||
{
|
||||
local xchg="$1"; shift
|
||||
cat <<EOF
|
||||
#ifndef ${xchg}_relaxed
|
||||
#define ${xchg}_relaxed ${xchg}
|
||||
#define ${xchg}_acquire ${xchg}
|
||||
#define ${xchg}_release ${xchg}
|
||||
#else /* ${xchg}_relaxed */
|
||||
|
||||
#ifndef ${xchg}_acquire
|
||||
#define ${xchg}_acquire(...) \\
|
||||
__atomic_op_acquire(${xchg}, __VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#ifndef ${xchg}_release
|
||||
#define ${xchg}_release(...) \\
|
||||
__atomic_op_release(${xchg}, __VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#ifndef ${xchg}
|
||||
#define ${xchg}(...) \\
|
||||
__atomic_op_fence(${xchg}, __VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#endif /* ${xchg}_relaxed */
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
cat << EOF
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
// Generated by $0
|
||||
// DO NOT MODIFY THIS FILE DIRECTLY
|
||||
|
||||
#ifndef _LINUX_ATOMIC_FALLBACK_H
|
||||
#define _LINUX_ATOMIC_FALLBACK_H
|
||||
|
||||
EOF
|
||||
|
||||
for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
|
||||
gen_xchg_fallbacks "${xchg}"
|
||||
done
|
||||
|
||||
grep '^[a-z]' "$1" | while read name meta args; do
|
||||
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
|
||||
done
|
||||
|
||||
cat <<EOF
|
||||
#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
|
||||
#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
|
||||
|
||||
#ifdef CONFIG_GENERIC_ATOMIC64
|
||||
#include <asm-generic/atomic64.h>
|
||||
#endif
|
||||
|
||||
EOF
|
||||
|
||||
grep '^[a-z]' "$1" | while read name meta args; do
|
||||
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
|
||||
done
|
||||
|
||||
cat <<EOF
|
||||
#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c))
|
||||
#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c))
|
||||
|
||||
#endif /* _LINUX_ATOMIC_FALLBACK_H */
|
||||
EOF
|
182
scripts/atomic/gen-atomic-instrumented.sh
Executable file
182
scripts/atomic/gen-atomic-instrumented.sh
Executable file
@ -0,0 +1,182 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
ATOMICDIR=$(dirname $0)
|
||||
|
||||
. ${ATOMICDIR}/atomic-tbl.sh
|
||||
|
||||
#gen_param_check(arg)
|
||||
gen_param_check()
|
||||
{
|
||||
local arg="$1"; shift
|
||||
local type="${arg%%:*}"
|
||||
local name="$(gen_param_name "${arg}")"
|
||||
local rw="write"
|
||||
|
||||
case "${type#c}" in
|
||||
i) return;;
|
||||
esac
|
||||
|
||||
# We don't write to constant parameters
|
||||
[ ${type#c} != ${type} ] && rw="read"
|
||||
|
||||
printf "\tkasan_check_${rw}(${name}, sizeof(*${name}));\n"
|
||||
}
|
||||
|
||||
#gen_param_check(arg...)
|
||||
gen_params_checks()
|
||||
{
|
||||
while [ "$#" -gt 0 ]; do
|
||||
gen_param_check "$1"
|
||||
shift;
|
||||
done
|
||||
}
|
||||
|
||||
# gen_guard(meta, atomic, pfx, name, sfx, order)
|
||||
gen_guard()
|
||||
{
|
||||
local meta="$1"; shift
|
||||
local atomic="$1"; shift
|
||||
local pfx="$1"; shift
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
local order="$1"; shift
|
||||
|
||||
local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
|
||||
|
||||
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
|
||||
|
||||
# We definitely need a preprocessor symbol for this atomic if it is an
|
||||
# ordering variant, or if there's a generic fallback.
|
||||
if [ ! -z "${order}" ] || [ ! -z "${template}" ]; then
|
||||
printf "defined(${atomicname})"
|
||||
return
|
||||
fi
|
||||
|
||||
# If this is a base variant, but a relaxed variant *may* exist, then we
|
||||
# only have a preprocessor symbol if the relaxed variant isn't defined
|
||||
if meta_has_relaxed "${meta}"; then
|
||||
printf "!defined(${atomicname}_relaxed) || defined(${atomicname})"
|
||||
fi
|
||||
}
|
||||
|
||||
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
|
||||
gen_proto_order_variant()
|
||||
{
|
||||
local meta="$1"; shift
|
||||
local pfx="$1"; shift
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
local order="$1"; shift
|
||||
local atomic="$1"; shift
|
||||
local int="$1"; shift
|
||||
|
||||
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
|
||||
|
||||
local guard="$(gen_guard "${meta}" "${atomic}" "${pfx}" "${name}" "${sfx}" "${order}")"
|
||||
|
||||
local ret="$(gen_ret_type "${meta}" "${int}")"
|
||||
local params="$(gen_params "${int}" "${atomic}" "$@")"
|
||||
local checks="$(gen_params_checks "$@")"
|
||||
local args="$(gen_args "$@")"
|
||||
local retstmt="$(gen_ret_stmt "${meta}")"
|
||||
|
||||
[ ! -z "${guard}" ] && printf "#if ${guard}\n"
|
||||
|
||||
cat <<EOF
|
||||
static inline ${ret}
|
||||
${atomicname}(${params})
|
||||
{
|
||||
${checks}
|
||||
${retstmt}arch_${atomicname}(${args});
|
||||
}
|
||||
#define ${atomicname} ${atomicname}
|
||||
EOF
|
||||
|
||||
[ ! -z "${guard}" ] && printf "#endif\n"
|
||||
|
||||
printf "\n"
|
||||
}
|
||||
|
||||
gen_xchg()
|
||||
{
|
||||
local xchg="$1"; shift
|
||||
local mult="$1"; shift
|
||||
|
||||
cat <<EOF
|
||||
#define ${xchg}(ptr, ...) \\
|
||||
({ \\
|
||||
typeof(ptr) __ai_ptr = (ptr); \\
|
||||
kasan_check_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
|
||||
arch_${xchg}(__ai_ptr, __VA_ARGS__); \\
|
||||
})
|
||||
EOF
|
||||
}
|
||||
|
||||
gen_optional_xchg()
|
||||
{
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
local guard="defined(arch_${name}${sfx})"
|
||||
|
||||
[ -z "${sfx}" ] && guard="!defined(arch_${name}_relaxed) || defined(arch_${name})"
|
||||
|
||||
printf "#if ${guard}\n"
|
||||
gen_xchg "${name}${sfx}" ""
|
||||
printf "#endif\n\n"
|
||||
}
|
||||
|
||||
cat << EOF
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
// Generated by $0
|
||||
// DO NOT MODIFY THIS FILE DIRECTLY
|
||||
|
||||
/*
|
||||
* This file provides wrappers with KASAN instrumentation for atomic operations.
|
||||
* To use this functionality an arch's atomic.h file needs to define all
|
||||
* atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include
|
||||
* this file at the end. This file provides atomic_read() that forwards to
|
||||
* arch_atomic_read() for actual atomic operation.
|
||||
* Note: if an arch atomic operation is implemented by means of other atomic
|
||||
* operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use
|
||||
* arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
|
||||
* double instrumentation.
|
||||
*/
|
||||
#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
|
||||
#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
|
||||
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/kasan-checks.h>
|
||||
|
||||
EOF
|
||||
|
||||
grep '^[a-z]' "$1" | while read name meta args; do
|
||||
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
|
||||
done
|
||||
|
||||
grep '^[a-z]' "$1" | while read name meta args; do
|
||||
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
|
||||
done
|
||||
|
||||
for xchg in "xchg" "cmpxchg" "cmpxchg64"; do
|
||||
for order in "" "_acquire" "_release" "_relaxed"; do
|
||||
gen_optional_xchg "${xchg}" "${order}"
|
||||
done
|
||||
done
|
||||
|
||||
for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg"; do
|
||||
gen_xchg "${xchg}" ""
|
||||
printf "\n"
|
||||
done
|
||||
|
||||
gen_xchg "cmpxchg_double" "2 * "
|
||||
|
||||
printf "\n\n"
|
||||
|
||||
gen_xchg "cmpxchg_double_local" "2 * "
|
||||
|
||||
cat <<EOF
|
||||
|
||||
#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
|
||||
EOF
|
101
scripts/atomic/gen-atomic-long.sh
Executable file
101
scripts/atomic/gen-atomic-long.sh
Executable file
@ -0,0 +1,101 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
ATOMICDIR=$(dirname $0)
|
||||
|
||||
. ${ATOMICDIR}/atomic-tbl.sh
|
||||
|
||||
#gen_cast(arg, int, atomic)
|
||||
gen_cast()
|
||||
{
|
||||
local arg="$1"; shift
|
||||
local int="$1"; shift
|
||||
local atomic="$1"; shift
|
||||
|
||||
[ "${arg%%:*}" = "p" ] || return
|
||||
|
||||
printf "($(gen_param_type "${arg}" "${int}" "${atomic}"))"
|
||||
}
|
||||
|
||||
#gen_args_cast(int, atomic, arg...)
|
||||
gen_args_cast()
|
||||
{
|
||||
local int="$1"; shift
|
||||
local atomic="$1"; shift
|
||||
|
||||
while [ "$#" -gt 0 ]; do
|
||||
local cast="$(gen_cast "$1" "${int}" "${atomic}")"
|
||||
local arg="$(gen_param_name "$1")"
|
||||
printf "${cast}${arg}"
|
||||
[ "$#" -gt 1 ] && printf ", "
|
||||
shift;
|
||||
done
|
||||
}
|
||||
|
||||
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
|
||||
gen_proto_order_variant()
|
||||
{
|
||||
local meta="$1"; shift
|
||||
local name="$1$2$3$4"; shift; shift; shift; shift
|
||||
local atomic="$1"; shift
|
||||
local int="$1"; shift
|
||||
|
||||
local ret="$(gen_ret_type "${meta}" "long")"
|
||||
local params="$(gen_params "long" "atomic_long" "$@")"
|
||||
local argscast="$(gen_args_cast "${int}" "${atomic}" "$@")"
|
||||
local retstmt="$(gen_ret_stmt "${meta}")"
|
||||
|
||||
cat <<EOF
|
||||
static inline ${ret}
|
||||
atomic_long_${name}(${params})
|
||||
{
|
||||
${retstmt}${atomic}_${name}(${argscast});
|
||||
}
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
cat << EOF
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
// Generated by $0
|
||||
// DO NOT MODIFY THIS FILE DIRECTLY
|
||||
|
||||
#ifndef _ASM_GENERIC_ATOMIC_LONG_H
|
||||
#define _ASM_GENERIC_ATOMIC_LONG_H
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
typedef atomic64_t atomic_long_t;
|
||||
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
|
||||
#define atomic_long_cond_read_acquire atomic64_cond_read_acquire
|
||||
#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed
|
||||
#else
|
||||
typedef atomic_t atomic_long_t;
|
||||
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
|
||||
#define atomic_long_cond_read_acquire atomic_cond_read_acquire
|
||||
#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
|
||||
EOF
|
||||
|
||||
grep '^[a-z]' "$1" | while read name meta args; do
|
||||
gen_proto "${meta}" "${name}" "atomic64" "s64" ${args}
|
||||
done
|
||||
|
||||
cat <<EOF
|
||||
#else /* CONFIG_64BIT */
|
||||
|
||||
EOF
|
||||
|
||||
grep '^[a-z]' "$1" | while read name meta args; do
|
||||
gen_proto "${meta}" "${name}" "atomic" "int" ${args}
|
||||
done
|
||||
|
||||
cat <<EOF
|
||||
#endif /* CONFIG_64BIT */
|
||||
#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
|
||||
EOF
|
Loading…
Reference in New Issue
Block a user