Merge branch 'non-rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git

This commit is contained in:
Stephen Rothwell 2024-12-20 13:35:47 +11:00
commit 26232487a7
6 changed files with 155 additions and 97 deletions

View File

@ -18,6 +18,11 @@
#
# By convention, a blank line in a cell means "same as the preceding line".
#
# Note that the syntactic representation does not always match the sets and
# relations in linux-kernel.cat, due to redefinitions in linux-kernel.bell and
# lock.cat. For example, the po link between LKR and LKW is upgraded to an rmw
# link, and W[ACQUIRE] are not included in the Acquire set.
#
# Disclaimer. The table includes representations of "add" and "and" operations;
# corresponding/identical representations of "sub", "inc", "dec" and "or", "xor",
# "andnot" operations are omitted.
@ -27,16 +32,16 @@
------------------------------------------------------------------------------
| Non-RMW ops | |
------------------------------------------------------------------------------
| READ_ONCE | R[once] |
| READ_ONCE | R[ONCE] |
| atomic_read | |
| WRITE_ONCE | W[once] |
| WRITE_ONCE | W[ONCE] |
| atomic_set | |
| smp_load_acquire | R[acquire] |
| smp_load_acquire | R[ACQUIRE] |
| atomic_read_acquire | |
| smp_store_release | W[release] |
| smp_store_release | W[RELEASE] |
| atomic_set_release | |
| smp_store_mb | W[once] ->po F[mb] |
| smp_mb | F[mb] |
| smp_store_mb | W[ONCE] ->po F[MB] |
| smp_mb | F[MB] |
| smp_rmb | F[rmb] |
| smp_wmb | F[wmb] |
| smp_mb__before_atomic | F[before-atomic] |
@ -49,8 +54,8 @@
| rcu_read_lock | F[rcu-lock] |
| rcu_read_unlock | F[rcu-unlock] |
| synchronize_rcu | F[sync-rcu] |
| rcu_dereference | R[once] |
| rcu_assign_pointer | W[release] |
| rcu_dereference | R[ONCE] |
| rcu_assign_pointer | W[RELEASE] |
| srcu_read_lock | R[srcu-lock] |
| srcu_down_read | |
| srcu_read_unlock | W[srcu-unlock] |
@ -60,32 +65,31 @@
------------------------------------------------------------------------------
| RMW ops w/o return value | |
------------------------------------------------------------------------------
| atomic_add | R*[noreturn] ->rmw W*[once] |
| atomic_add | R*[NORETURN] ->rmw W*[NORETURN] |
| atomic_and | |
| spin_lock | LKR ->po LKW |
------------------------------------------------------------------------------
| RMW ops w/ return value | |
------------------------------------------------------------------------------
| atomic_add_return | F[mb] ->po R*[once] |
| | ->rmw W*[once] ->po F[mb] |
| atomic_add_return | R*[MB] ->rmw W*[MB] |
| atomic_fetch_add | |
| atomic_fetch_and | |
| atomic_xchg | |
| xchg | |
| atomic_add_negative | |
| atomic_add_return_relaxed | R*[once] ->rmw W*[once] |
| atomic_add_return_relaxed | R*[ONCE] ->rmw W*[ONCE] |
| atomic_fetch_add_relaxed | |
| atomic_fetch_and_relaxed | |
| atomic_xchg_relaxed | |
| xchg_relaxed | |
| atomic_add_negative_relaxed | |
| atomic_add_return_acquire | R*[acquire] ->rmw W*[once] |
| atomic_add_return_acquire | R*[ACQUIRE] ->rmw W*[ACQUIRE] |
| atomic_fetch_add_acquire | |
| atomic_fetch_and_acquire | |
| atomic_xchg_acquire | |
| xchg_acquire | |
| atomic_add_negative_acquire | |
| atomic_add_return_release | R*[once] ->rmw W*[release] |
| atomic_add_return_release | R*[RELEASE] ->rmw W*[RELEASE] |
| atomic_fetch_add_release | |
| atomic_fetch_and_release | |
| atomic_xchg_release | |
@ -94,17 +98,16 @@
------------------------------------------------------------------------------
| Conditional RMW ops | |
------------------------------------------------------------------------------
| atomic_cmpxchg | On success: F[mb] ->po R*[once] |
| | ->rmw W*[once] ->po F[mb] |
| | On failure: R*[once] |
| atomic_cmpxchg | On success: R*[MB] ->rmw W*[MB] |
| | On failure: R*[MB] |
| cmpxchg | |
| atomic_add_unless | |
| atomic_cmpxchg_relaxed | On success: R*[once] ->rmw W*[once] |
| | On failure: R*[once] |
| atomic_cmpxchg_acquire | On success: R*[acquire] ->rmw W*[once] |
| | On failure: R*[once] |
| atomic_cmpxchg_release | On success: R*[once] ->rmw W*[release] |
| | On failure: R*[once] |
| atomic_cmpxchg_relaxed | On success: R*[ONCE] ->rmw W*[ONCE] |
| | On failure: R*[ONCE] |
| atomic_cmpxchg_acquire | On success: R*[ACQUIRE] ->rmw W*[ACQUIRE] |
| | On failure: R*[ACQUIRE] |
| atomic_cmpxchg_release | On success: R*[RELEASE] ->rmw W*[RELEASE] |
| | On failure: R*[RELEASE] |
| spin_trylock | On success: LKR ->po LKW |
| | On failure: LF |
------------------------------------------------------------------------------

View File

@ -20,7 +20,7 @@ that litmus test to be exercised within the Linux kernel.
REQUIREMENTS
============
Version 7.52 or higher of the "herd7" and "klitmus7" tools must be
Version 7.58 or higher of the "herd7" and "klitmus7" tools must be
downloaded separately:
https://github.com/herd/herdtools7

View File

@ -13,17 +13,18 @@
"Linux-kernel memory consistency model"
enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
'release (*smp_store_release*) ||
'acquire (*smp_load_acquire*) ||
'noreturn (* R of non-return RMW *)
instructions R[{'once,'acquire,'noreturn}]
instructions W[{'once,'release}]
instructions RMW[{'once,'acquire,'release}]
enum Accesses = 'ONCE (*READ_ONCE,WRITE_ONCE*) ||
'RELEASE (*smp_store_release*) ||
'ACQUIRE (*smp_load_acquire*) ||
'NORETURN (* R of non-return RMW *) ||
'MB (*xchg(),cmpxchg(),...*)
instructions R[Accesses]
instructions W[Accesses]
instructions RMW[Accesses]
enum Barriers = 'wmb (*smp_wmb*) ||
'rmb (*smp_rmb*) ||
'mb (*smp_mb*) ||
'MB (*smp_mb*) ||
'barrier (*barrier*) ||
'rcu-lock (*rcu_read_lock*) ||
'rcu-unlock (*rcu_read_unlock*) ||
@ -35,6 +36,17 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'after-srcu-read-unlock (*smp_mb__after_srcu_read_unlock*)
instructions F[Barriers]
(*
* Filter out syntactic annotations that do not provide the corresponding
* semantic ordering, such as Acquire on a store or Mb on a failed RMW.
*)
let FailedRMW = RMW \ (domain(rmw) | range(rmw))
let Acquire = ACQUIRE \ W \ FailedRMW
let Release = RELEASE \ R \ FailedRMW
let Mb = MB \ FailedRMW
let Noreturn = NORETURN \ W
(* SRCU *)
enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu
instructions SRCU[SRCU]
@ -73,7 +85,7 @@ flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
flag ~empty different-values(srcu-rscs) as srcu-bad-value-match
(* Compute marked and plain memory accesses *)
let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) |
let Marked = (~M) | IW | ONCE | RELEASE | ACQUIRE | MB | RMW |
LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock
let Plain = M \ Marked
@ -82,3 +94,6 @@ let carry-dep = (data ; [~ Srcu-unlock] ; rfi)*
let addr = carry-dep ; addr
let ctrl = carry-dep ; ctrl
let data = carry-dep ; data
flag ~empty (if "lkmmv2" then 0 else _)
as this-model-requires-variant-higher-than-lkmmv1

View File

@ -34,6 +34,16 @@ let R4rmb = R \ Noreturn (* Reads for which rmb works *)
let rmb = [R4rmb] ; fencerel(Rmb) ; [R4rmb]
let wmb = [W] ; fencerel(Wmb) ; [W]
let mb = ([M] ; fencerel(Mb) ; [M]) |
(*
* full-barrier RMWs (successful cmpxchg(), xchg(), etc.) act as
* though there were enclosed by smp_mb().
* The effect of these virtual smp_mb() is formalized by adding
* Mb tags to the read and write of the operation, and providing
* the same ordering as though there were additional po edges
* between the Mb tag and the read resp. write.
*)
([M] ; po ; [Mb & R]) |
([Mb & W] ; po ; [M]) |
([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |

View File

@ -1,6 +1,7 @@
macros linux-kernel.def
bell linux-kernel.bell
model linux-kernel.cat
variant lkmmv2
graph columns
squished true
showevents noregs

View File

@ -6,18 +6,18 @@
// which appeared in ASPLOS 2018.
// ONCE
READ_ONCE(X) __load{once}(X)
WRITE_ONCE(X,V) { __store{once}(X,V); }
READ_ONCE(X) __load{ONCE}(X)
WRITE_ONCE(X,V) { __store{ONCE}(X,V); }
// Release Acquire and friends
smp_store_release(X,V) { __store{release}(*X,V); }
smp_load_acquire(X) __load{acquire}(*X)
rcu_assign_pointer(X,V) { __store{release}(X,V); }
rcu_dereference(X) __load{once}(X)
smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
smp_store_release(X,V) { __store{RELEASE}(*X,V); }
smp_load_acquire(X) __load{ACQUIRE}(*X)
rcu_assign_pointer(X,V) { __store{RELEASE}(X,V); }
rcu_dereference(X) __load{ONCE}(X)
smp_store_mb(X,V) { __store{ONCE}(X,V); __fence{MB}; }
// Fences
smp_mb() { __fence{mb}; }
smp_mb() { __fence{MB}; }
smp_rmb() { __fence{rmb}; }
smp_wmb() { __fence{wmb}; }
smp_mb__before_atomic() { __fence{before-atomic}; }
@ -28,14 +28,14 @@ smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
barrier() { __fence{barrier}; }
// Exchange
xchg(X,V) __xchg{mb}(X,V)
xchg_relaxed(X,V) __xchg{once}(X,V)
xchg_release(X,V) __xchg{release}(X,V)
xchg_acquire(X,V) __xchg{acquire}(X,V)
cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
xchg(X,V) __xchg{MB}(X,V)
xchg_relaxed(X,V) __xchg{ONCE}(X,V)
xchg_release(X,V) __xchg{RELEASE}(X,V)
xchg_acquire(X,V) __xchg{ACQUIRE}(X,V)
cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W)
cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W)
cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W)
cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W)
// Spinlocks
spin_lock(X) { __lock(X); }
@ -63,57 +63,86 @@ atomic_set(X,V) { WRITE_ONCE(*X,V); }
atomic_read_acquire(X) smp_load_acquire(X)
atomic_set_release(X,V) { smp_store_release(X,V); }
atomic_add(V,X) { __atomic_op(X,+,V); }
atomic_sub(V,X) { __atomic_op(X,-,V); }
atomic_inc(X) { __atomic_op(X,+,1); }
atomic_dec(X) { __atomic_op(X,-,1); }
atomic_add(V,X) { __atomic_op{NORETURN}(X,+,V); }
atomic_sub(V,X) { __atomic_op{NORETURN}(X,-,V); }
atomic_and(V,X) { __atomic_op{NORETURN}(X,&,V); }
atomic_or(V,X) { __atomic_op{NORETURN}(X,|,V); }
atomic_xor(V,X) { __atomic_op{NORETURN}(X,^,V); }
atomic_inc(X) { __atomic_op{NORETURN}(X,+,1); }
atomic_dec(X) { __atomic_op{NORETURN}(X,-,1); }
atomic_andnot(V,X) { __atomic_op{NORETURN}(X,&~,V); }
atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
atomic_add_return(V,X) __atomic_op_return{MB}(X,+,V)
atomic_add_return_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V)
atomic_add_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V)
atomic_add_return_release(V,X) __atomic_op_return{RELEASE}(X,+,V)
atomic_fetch_add(V,X) __atomic_fetch_op{MB}(X,+,V)
atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{ONCE}(X,+,V)
atomic_fetch_add_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,+,V)
atomic_fetch_add_release(V,X) __atomic_fetch_op{RELEASE}(X,+,V)
atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
atomic_fetch_and(V,X) __atomic_fetch_op{MB}(X,&,V)
atomic_fetch_and_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&,V)
atomic_fetch_and_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&,V)
atomic_fetch_and_release(V,X) __atomic_fetch_op{RELEASE}(X,&,V)
atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
atomic_fetch_or(V,X) __atomic_fetch_op{MB}(X,|,V)
atomic_fetch_or_relaxed(V,X) __atomic_fetch_op{ONCE}(X,|,V)
atomic_fetch_or_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,|,V)
atomic_fetch_or_release(V,X) __atomic_fetch_op{RELEASE}(X,|,V)
atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
atomic_fetch_xor(V,X) __atomic_fetch_op{MB}(X,^,V)
atomic_fetch_xor_relaxed(V,X) __atomic_fetch_op{ONCE}(X,^,V)
atomic_fetch_xor_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,^,V)
atomic_fetch_xor_release(V,X) __atomic_fetch_op{RELEASE}(X,^,V)
atomic_xchg(X,V) __xchg{mb}(X,V)
atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
atomic_xchg_release(X,V) __xchg{release}(X,V)
atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
atomic_inc_return(X) __atomic_op_return{MB}(X,+,1)
atomic_inc_return_relaxed(X) __atomic_op_return{ONCE}(X,+,1)
atomic_inc_return_acquire(X) __atomic_op_return{ACQUIRE}(X,+,1)
atomic_inc_return_release(X) __atomic_op_return{RELEASE}(X,+,1)
atomic_fetch_inc(X) __atomic_fetch_op{MB}(X,+,1)
atomic_fetch_inc_relaxed(X) __atomic_fetch_op{ONCE}(X,+,1)
atomic_fetch_inc_acquire(X) __atomic_fetch_op{ACQUIRE}(X,+,1)
atomic_fetch_inc_release(X) __atomic_fetch_op{RELEASE}(X,+,1)
atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0
atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0
atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0
atomic_sub_return(V,X) __atomic_op_return{MB}(X,-,V)
atomic_sub_return_relaxed(V,X) __atomic_op_return{ONCE}(X,-,V)
atomic_sub_return_acquire(V,X) __atomic_op_return{ACQUIRE}(X,-,V)
atomic_sub_return_release(V,X) __atomic_op_return{RELEASE}(X,-,V)
atomic_fetch_sub(V,X) __atomic_fetch_op{MB}(X,-,V)
atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{ONCE}(X,-,V)
atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,-,V)
atomic_fetch_sub_release(V,X) __atomic_fetch_op{RELEASE}(X,-,V)
atomic_dec_return(X) __atomic_op_return{MB}(X,-,1)
atomic_dec_return_relaxed(X) __atomic_op_return{ONCE}(X,-,1)
atomic_dec_return_acquire(X) __atomic_op_return{ACQUIRE}(X,-,1)
atomic_dec_return_release(X) __atomic_op_return{RELEASE}(X,-,1)
atomic_fetch_dec(X) __atomic_fetch_op{MB}(X,-,1)
atomic_fetch_dec_relaxed(X) __atomic_fetch_op{ONCE}(X,-,1)
atomic_fetch_dec_acquire(X) __atomic_fetch_op{ACQUIRE}(X,-,1)
atomic_fetch_dec_release(X) __atomic_fetch_op{RELEASE}(X,-,1)
atomic_xchg(X,V) __xchg{MB}(X,V)
atomic_xchg_relaxed(X,V) __xchg{ONCE}(X,V)
atomic_xchg_release(X,V) __xchg{RELEASE}(X,V)
atomic_xchg_acquire(X,V) __xchg{ACQUIRE}(X,V)
atomic_cmpxchg(X,V,W) __cmpxchg{MB}(X,V,W)
atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{ONCE}(X,V,W)
atomic_cmpxchg_acquire(X,V,W) __cmpxchg{ACQUIRE}(X,V,W)
atomic_cmpxchg_release(X,V,W) __cmpxchg{RELEASE}(X,V,W)
atomic_sub_and_test(V,X) __atomic_op_return{MB}(X,-,V) == 0
atomic_dec_and_test(X) __atomic_op_return{MB}(X,-,1) == 0
atomic_inc_and_test(X) __atomic_op_return{MB}(X,+,1) == 0
atomic_add_negative(V,X) __atomic_op_return{MB}(X,+,V) < 0
atomic_add_negative_relaxed(V,X) __atomic_op_return{ONCE}(X,+,V) < 0
atomic_add_negative_acquire(V,X) __atomic_op_return{ACQUIRE}(X,+,V) < 0
atomic_add_negative_release(V,X) __atomic_op_return{RELEASE}(X,+,V) < 0
atomic_fetch_andnot(V,X) __atomic_fetch_op{MB}(X,&~,V)
atomic_fetch_andnot_acquire(V,X) __atomic_fetch_op{ACQUIRE}(X,&~,V)
atomic_fetch_andnot_release(V,X) __atomic_fetch_op{RELEASE}(X,&~,V)
atomic_fetch_andnot_relaxed(V,X) __atomic_fetch_op{ONCE}(X,&~,V)
atomic_add_unless(X,V,W) __atomic_add_unless{MB}(X,V,W)