mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
KCSAN updates for v5.17
This series provides KCSAN fixes and also the ability to take memory barriers into account for weakly-ordered systems. This last can increase the probability of detecting certain types of data races. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEbK7UrM+RBIrCoViJnr8S83LZ+4wFAmHbuRwTHHBhdWxtY2tA a2VybmVsLm9yZwAKCRCevxLzctn7jKDPEACWuzYnd/u/02AHyRd3PIF3Px9uFKlK TFwaXX95oYSFCXcrmO42YtDUlZm4QcbwNb85KMCu1DvckRtIsNw0rkBU7BGyqv3Z ZoJEfMNpmC0x9+IFBOeseBHySPVT0x7GmYus05MSh0OLfkbCfyImmxRzgoKJGL+A ADF9EQb4z2feWjmVEoN8uRaarCAD4f77rSXiX6oTCNDuKrHarqMld/TmoXFrJbu2 QtfwHeyvraKBnZdUoYfVbGVenyKb1vMv4bUlvrOcuJEgIi/J/th4FupR3XCGYulI aWJTl2TQTGnMoE8VnFHgI27I841w3k5PVL+Y1hr/S4uN1/rIoQQuBzCtlnFeCksa BiBXsHIchN8N0Dwh8zD8NMd2uxV4t3fmpxXTDAwaOm7vs5hA8AJ0XNu6Sz94Lyjv wk2CxX41WWUNJVo3gh6SrS4mL6lC8+VvHF1hbIap++jrevj58gj1jAR1fdx4ANlT e2qA00EeoMngEogDNZH42/Fxs3H9zxrBta2ZbkPkwzIqTHH+4pIQDCy2xO3T3oxc twdGPYpjYdkf79EGsG4I4R/VA/IfcS09VIWTce8xSDeSnqkgFhcG37r1orJe8hTB tH+ODkNOsz5HaEoa8OoAL4ko2l0fL99p2AtMPpuQfHjRj7aorF+dJIrqCizASxwx 37PjQgOmHeDHgQ== =Q5fg -----END PGP SIGNATURE----- Merge tag 'kcsan.2022.01.09a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu Pull KCSAN updates from Paul McKenney: "This provides KCSAN fixes and also the ability to take memory barriers into account for weakly-ordered systems. This last can increase the probability of detecting certain types of data races" * tag 'kcsan.2022.01.09a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: (29 commits) kcsan: Only test clear_bit_unlock_is_negative_byte if arch defines it kcsan: Avoid nested contexts reading inconsistent reorder_access kcsan: Turn barrier instrumentation into macros kcsan: Make barrier tests compatible with lockdep kcsan: Support WEAK_MEMORY with Clang where no objtool support exists compiler_attributes.h: Add __disable_sanitizer_instrumentation objtool, kcsan: Remove memory barrier instrumentation from noinstr objtool, kcsan: Add memory barrier instrumentation to whitelist sched, kcsan: Enable memory barrier instrumentation mm, kcsan: Enable barrier instrumentation x86/qspinlock, kcsan: Instrument barrier of pv_queued_spin_unlock() x86/barriers, kcsan: Use generic instrumentation for non-smp barriers asm-generic/bitops, kcsan: Add instrumentation for barriers locking/atomics, kcsan: Add instrumentation for barriers locking/barriers, kcsan: Support generic instrumentation locking/barriers, kcsan: Add instrumentation for barriers kcsan: selftest: Add test case to check memory barrier instrumentation kcsan: Ignore GCC 11+ warnings about TSan runtime support kcsan: test: Add test cases for memory barrier instrumentation kcsan: test: Match reordered or normal accesses ...
This commit is contained in:
commit
1be5bdf8cd
@ -204,17 +204,17 @@ Ultimately this allows to determine the possible executions of concurrent code,
|
||||
and if that code is free from data races.
|
||||
|
||||
KCSAN is aware of *marked atomic operations* (``READ_ONCE``, ``WRITE_ONCE``,
|
||||
``atomic_*``, etc.), but is oblivious of any ordering guarantees and simply
|
||||
assumes that memory barriers are placed correctly. In other words, KCSAN
|
||||
assumes that as long as a plain access is not observed to race with another
|
||||
conflicting access, memory operations are correctly ordered.
|
||||
``atomic_*``, etc.), and a subset of ordering guarantees implied by memory
|
||||
barriers. With ``CONFIG_KCSAN_WEAK_MEMORY=y``, KCSAN models load or store
|
||||
buffering, and can detect missing ``smp_mb()``, ``smp_wmb()``, ``smp_rmb()``,
|
||||
``smp_store_release()``, and all ``atomic_*`` operations with equivalent
|
||||
implied barriers.
|
||||
|
||||
This means that KCSAN will not report *potential* data races due to missing
|
||||
memory ordering. Developers should therefore carefully consider the required
|
||||
memory ordering requirements that remain unchecked. If, however, missing
|
||||
memory ordering (that is observable with a particular compiler and
|
||||
architecture) leads to an observable data race (e.g. entering a critical
|
||||
section erroneously), KCSAN would report the resulting data race.
|
||||
Note, KCSAN will not report all data races due to missing memory ordering,
|
||||
specifically where a memory barrier would be required to prohibit subsequent
|
||||
memory operation from reordering before the barrier. Developers should
|
||||
therefore carefully consider the required memory ordering requirements that
|
||||
remain unchecked.
|
||||
|
||||
Race Detection Beyond Data Races
|
||||
--------------------------------
|
||||
@ -268,6 +268,56 @@ marked operations, if all accesses to a variable that is accessed concurrently
|
||||
are properly marked, KCSAN will never trigger a watchpoint and therefore never
|
||||
report the accesses.
|
||||
|
||||
Modeling Weak Memory
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
KCSAN's approach to detecting data races due to missing memory barriers is
|
||||
based on modeling access reordering (with ``CONFIG_KCSAN_WEAK_MEMORY=y``).
|
||||
Each plain memory access for which a watchpoint is set up, is also selected for
|
||||
simulated reordering within the scope of its function (at most 1 in-flight
|
||||
access).
|
||||
|
||||
Once an access has been selected for reordering, it is checked along every
|
||||
other access until the end of the function scope. If an appropriate memory
|
||||
barrier is encountered, the access will no longer be considered for simulated
|
||||
reordering.
|
||||
|
||||
When the result of a memory operation should be ordered by a barrier, KCSAN can
|
||||
then detect data races where the conflict only occurs as a result of a missing
|
||||
barrier. Consider the example::
|
||||
|
||||
int x, flag;
|
||||
void T1(void)
|
||||
{
|
||||
x = 1; // data race!
|
||||
WRITE_ONCE(flag, 1); // correct: smp_store_release(&flag, 1)
|
||||
}
|
||||
void T2(void)
|
||||
{
|
||||
while (!READ_ONCE(flag)); // correct: smp_load_acquire(&flag)
|
||||
... = x; // data race!
|
||||
}
|
||||
|
||||
When weak memory modeling is enabled, KCSAN can consider ``x`` in ``T1`` for
|
||||
simulated reordering. After the write of ``flag``, ``x`` is again checked for
|
||||
concurrent accesses: because ``T2`` is able to proceed after the write of
|
||||
``flag``, a data race is detected. With the correct barriers in place, ``x``
|
||||
would not be considered for reordering after the proper release of ``flag``,
|
||||
and no data race would be detected.
|
||||
|
||||
Deliberate trade-offs in complexity but also practical limitations mean only a
|
||||
subset of data races due to missing memory barriers can be detected. With
|
||||
currently available compiler support, the implementation is limited to modeling
|
||||
the effects of "buffering" (delaying accesses), since the runtime cannot
|
||||
"prefetch" accesses. Also recall that watchpoints are only set up for plain
|
||||
accesses, and the only access type for which KCSAN simulates reordering. This
|
||||
means reordering of marked accesses is not modeled.
|
||||
|
||||
A consequence of the above is that acquire operations do not require barrier
|
||||
instrumentation (no prefetching). Furthermore, marked accesses introducing
|
||||
address or control dependencies do not require special handling (the marked
|
||||
access cannot be reordered, later dependent accesses cannot be prefetched).
|
||||
|
||||
Key Properties
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
@ -290,8 +340,8 @@ Key Properties
|
||||
4. **Detects Racy Writes from Devices:** Due to checking data values upon
|
||||
setting up watchpoints, racy writes from devices can also be detected.
|
||||
|
||||
5. **Memory Ordering:** KCSAN is *not* explicitly aware of the LKMM's ordering
|
||||
rules; this may result in missed data races (false negatives).
|
||||
5. **Memory Ordering:** KCSAN is aware of only a subset of LKMM ordering rules;
|
||||
this may result in missed data races (false negatives).
|
||||
|
||||
6. **Analysis Accuracy:** For observed executions, due to using a sampling
|
||||
strategy, the analysis is *unsound* (false negatives possible), but aims to
|
||||
|
@ -19,9 +19,9 @@
|
||||
#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
|
||||
X86_FEATURE_XMM2) ::: "memory", "cc")
|
||||
#else
|
||||
#define mb() asm volatile("mfence":::"memory")
|
||||
#define rmb() asm volatile("lfence":::"memory")
|
||||
#define wmb() asm volatile("sfence" ::: "memory")
|
||||
#define __mb() asm volatile("mfence":::"memory")
|
||||
#define __rmb() asm volatile("lfence":::"memory")
|
||||
#define __wmb() asm volatile("sfence" ::: "memory")
|
||||
#endif
|
||||
|
||||
/**
|
||||
@ -51,8 +51,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
||||
/* Prevent speculative execution past this barrier. */
|
||||
#define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
|
||||
|
||||
#define dma_rmb() barrier()
|
||||
#define dma_wmb() barrier()
|
||||
#define __dma_rmb() barrier()
|
||||
#define __dma_wmb() barrier()
|
||||
|
||||
#define __smp_mb() asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
|
||||
|
||||
|
@ -53,6 +53,7 @@ static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||
|
||||
static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
kcsan_release();
|
||||
pv_queued_spin_unlock(lock);
|
||||
}
|
||||
|
||||
|
@ -14,12 +14,38 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kcsan-checks.h>
|
||||
#include <asm/rwonce.h>
|
||||
|
||||
#ifndef nop
|
||||
#define nop() asm volatile ("nop")
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Architectures that want generic instrumentation can define __ prefixed
|
||||
* variants of all barriers.
|
||||
*/
|
||||
|
||||
#ifdef __mb
|
||||
#define mb() do { kcsan_mb(); __mb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __rmb
|
||||
#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __wmb
|
||||
#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __dma_rmb
|
||||
#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifdef __dma_wmb
|
||||
#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Force strict CPU ordering. And yes, this is required on UP too when we're
|
||||
* talking to devices.
|
||||
@ -62,15 +88,15 @@
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifndef smp_mb
|
||||
#define smp_mb() __smp_mb()
|
||||
#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef smp_rmb
|
||||
#define smp_rmb() __smp_rmb()
|
||||
#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef smp_wmb
|
||||
#define smp_wmb() __smp_wmb()
|
||||
#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
|
||||
#endif
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
@ -123,19 +149,19 @@ do { \
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifndef smp_store_mb
|
||||
#define smp_store_mb(var, value) __smp_store_mb(var, value)
|
||||
#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__before_atomic
|
||||
#define smp_mb__before_atomic() __smp_mb__before_atomic()
|
||||
#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef smp_mb__after_atomic
|
||||
#define smp_mb__after_atomic() __smp_mb__after_atomic()
|
||||
#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef smp_store_release
|
||||
#define smp_store_release(p, v) __smp_store_release(p, v)
|
||||
#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
|
||||
#endif
|
||||
|
||||
#ifndef smp_load_acquire
|
||||
@ -178,13 +204,13 @@ do { \
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/* Barriers for virtual machine guests when talking to an SMP host */
|
||||
#define virt_mb() __smp_mb()
|
||||
#define virt_rmb() __smp_rmb()
|
||||
#define virt_wmb() __smp_wmb()
|
||||
#define virt_store_mb(var, value) __smp_store_mb(var, value)
|
||||
#define virt_mb__before_atomic() __smp_mb__before_atomic()
|
||||
#define virt_mb__after_atomic() __smp_mb__after_atomic()
|
||||
#define virt_store_release(p, v) __smp_store_release(p, v)
|
||||
#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
|
||||
#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
|
||||
#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
|
||||
#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
|
||||
#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
|
||||
#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
|
||||
#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
|
||||
#define virt_load_acquire(p) __smp_load_acquire(p)
|
||||
|
||||
/**
|
||||
|
@ -67,6 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
return arch_test_and_set_bit(nr, addr);
|
||||
}
|
||||
@ -80,6 +81,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
return arch_test_and_clear_bit(nr, addr);
|
||||
}
|
||||
@ -93,6 +95,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
return arch_test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
arch_clear_bit_unlock(nr, addr);
|
||||
}
|
||||
@ -37,6 +38,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||
*/
|
||||
static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
arch___clear_bit_unlock(nr, addr);
|
||||
}
|
||||
@ -71,6 +73,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
|
||||
static inline bool
|
||||
clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
|
||||
return arch_clear_bit_unlock_is_negative_byte(nr, addr);
|
||||
}
|
||||
|
@ -45,6 +45,7 @@ atomic_set(atomic_t *v, int i)
|
||||
static __always_inline void
|
||||
atomic_set_release(atomic_t *v, int i)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_write(v, sizeof(*v));
|
||||
arch_atomic_set_release(v, i);
|
||||
}
|
||||
@ -59,6 +60,7 @@ atomic_add(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_add_return(i, v);
|
||||
}
|
||||
@ -73,6 +75,7 @@ atomic_add_return_acquire(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_add_return_release(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_add_return_release(i, v);
|
||||
}
|
||||
@ -87,6 +90,7 @@ atomic_add_return_relaxed(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_add(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_add(i, v);
|
||||
}
|
||||
@ -101,6 +105,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_add_release(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_add_release(i, v);
|
||||
}
|
||||
@ -122,6 +127,7 @@ atomic_sub(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_sub_return(i, v);
|
||||
}
|
||||
@ -136,6 +142,7 @@ atomic_sub_return_acquire(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_sub_return_release(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_sub_return_release(i, v);
|
||||
}
|
||||
@ -150,6 +157,7 @@ atomic_sub_return_relaxed(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_sub(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_sub(i, v);
|
||||
}
|
||||
@ -164,6 +172,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_sub_release(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_sub_release(i, v);
|
||||
}
|
||||
@ -185,6 +194,7 @@ atomic_inc(atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_inc_return(atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_inc_return(v);
|
||||
}
|
||||
@ -199,6 +209,7 @@ atomic_inc_return_acquire(atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_inc_return_release(atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_inc_return_release(v);
|
||||
}
|
||||
@ -213,6 +224,7 @@ atomic_inc_return_relaxed(atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_inc(atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_inc(v);
|
||||
}
|
||||
@ -227,6 +239,7 @@ atomic_fetch_inc_acquire(atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_inc_release(atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_inc_release(v);
|
||||
}
|
||||
@ -248,6 +261,7 @@ atomic_dec(atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_dec_return(atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_dec_return(v);
|
||||
}
|
||||
@ -262,6 +276,7 @@ atomic_dec_return_acquire(atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_dec_return_release(atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_dec_return_release(v);
|
||||
}
|
||||
@ -276,6 +291,7 @@ atomic_dec_return_relaxed(atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_dec(atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_dec(v);
|
||||
}
|
||||
@ -290,6 +306,7 @@ atomic_fetch_dec_acquire(atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_dec_release(atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_dec_release(v);
|
||||
}
|
||||
@ -311,6 +328,7 @@ atomic_and(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_and(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_and(i, v);
|
||||
}
|
||||
@ -325,6 +343,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_and_release(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_and_release(i, v);
|
||||
}
|
||||
@ -346,6 +365,7 @@ atomic_andnot(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_andnot(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_andnot(i, v);
|
||||
}
|
||||
@ -360,6 +380,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_andnot_release(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_andnot_release(i, v);
|
||||
}
|
||||
@ -381,6 +402,7 @@ atomic_or(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_or(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_or(i, v);
|
||||
}
|
||||
@ -395,6 +417,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_or_release(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_or_release(i, v);
|
||||
}
|
||||
@ -416,6 +439,7 @@ atomic_xor(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_xor(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_xor(i, v);
|
||||
}
|
||||
@ -430,6 +454,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_xor_release(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_xor_release(i, v);
|
||||
}
|
||||
@ -444,6 +469,7 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_xchg(atomic_t *v, int i)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_xchg(v, i);
|
||||
}
|
||||
@ -458,6 +484,7 @@ atomic_xchg_acquire(atomic_t *v, int i)
|
||||
static __always_inline int
|
||||
atomic_xchg_release(atomic_t *v, int i)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_xchg_release(v, i);
|
||||
}
|
||||
@ -472,6 +499,7 @@ atomic_xchg_relaxed(atomic_t *v, int i)
|
||||
static __always_inline int
|
||||
atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_cmpxchg(v, old, new);
|
||||
}
|
||||
@ -486,6 +514,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
|
||||
static __always_inline int
|
||||
atomic_cmpxchg_release(atomic_t *v, int old, int new)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_cmpxchg_release(v, old, new);
|
||||
}
|
||||
@ -500,6 +529,7 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
|
||||
static __always_inline bool
|
||||
atomic_try_cmpxchg(atomic_t *v, int *old, int new)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
return arch_atomic_try_cmpxchg(v, old, new);
|
||||
@ -516,6 +546,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
|
||||
static __always_inline bool
|
||||
atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
return arch_atomic_try_cmpxchg_release(v, old, new);
|
||||
@ -532,6 +563,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
|
||||
static __always_inline bool
|
||||
atomic_sub_and_test(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_sub_and_test(i, v);
|
||||
}
|
||||
@ -539,6 +571,7 @@ atomic_sub_and_test(int i, atomic_t *v)
|
||||
static __always_inline bool
|
||||
atomic_dec_and_test(atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_dec_and_test(v);
|
||||
}
|
||||
@ -546,6 +579,7 @@ atomic_dec_and_test(atomic_t *v)
|
||||
static __always_inline bool
|
||||
atomic_inc_and_test(atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_inc_and_test(v);
|
||||
}
|
||||
@ -553,6 +587,7 @@ atomic_inc_and_test(atomic_t *v)
|
||||
static __always_inline bool
|
||||
atomic_add_negative(int i, atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_add_negative(i, v);
|
||||
}
|
||||
@ -560,6 +595,7 @@ atomic_add_negative(int i, atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_fetch_add_unless(v, a, u);
|
||||
}
|
||||
@ -567,6 +603,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
static __always_inline bool
|
||||
atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_add_unless(v, a, u);
|
||||
}
|
||||
@ -574,6 +611,7 @@ atomic_add_unless(atomic_t *v, int a, int u)
|
||||
static __always_inline bool
|
||||
atomic_inc_not_zero(atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_inc_not_zero(v);
|
||||
}
|
||||
@ -581,6 +619,7 @@ atomic_inc_not_zero(atomic_t *v)
|
||||
static __always_inline bool
|
||||
atomic_inc_unless_negative(atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_inc_unless_negative(v);
|
||||
}
|
||||
@ -588,6 +627,7 @@ atomic_inc_unless_negative(atomic_t *v)
|
||||
static __always_inline bool
|
||||
atomic_dec_unless_positive(atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_dec_unless_positive(v);
|
||||
}
|
||||
@ -595,6 +635,7 @@ atomic_dec_unless_positive(atomic_t *v)
|
||||
static __always_inline int
|
||||
atomic_dec_if_positive(atomic_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_dec_if_positive(v);
|
||||
}
|
||||
@ -623,6 +664,7 @@ atomic64_set(atomic64_t *v, s64 i)
|
||||
static __always_inline void
|
||||
atomic64_set_release(atomic64_t *v, s64 i)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_write(v, sizeof(*v));
|
||||
arch_atomic64_set_release(v, i);
|
||||
}
|
||||
@ -637,6 +679,7 @@ atomic64_add(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_add_return(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_add_return(i, v);
|
||||
}
|
||||
@ -651,6 +694,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_add_return_release(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_add_return_release(i, v);
|
||||
}
|
||||
@ -665,6 +709,7 @@ atomic64_add_return_relaxed(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_add(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_add(i, v);
|
||||
}
|
||||
@ -679,6 +724,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_add_release(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_add_release(i, v);
|
||||
}
|
||||
@ -700,6 +746,7 @@ atomic64_sub(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_sub_return(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_sub_return(i, v);
|
||||
}
|
||||
@ -714,6 +761,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_sub_return_release(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_sub_return_release(i, v);
|
||||
}
|
||||
@ -728,6 +776,7 @@ atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_sub(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_sub(i, v);
|
||||
}
|
||||
@ -742,6 +791,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_sub_release(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_sub_release(i, v);
|
||||
}
|
||||
@ -763,6 +813,7 @@ atomic64_inc(atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_inc_return(atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_inc_return(v);
|
||||
}
|
||||
@ -777,6 +828,7 @@ atomic64_inc_return_acquire(atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_inc_return_release(atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_inc_return_release(v);
|
||||
}
|
||||
@ -791,6 +843,7 @@ atomic64_inc_return_relaxed(atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_inc(atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_inc(v);
|
||||
}
|
||||
@ -805,6 +858,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_inc_release(atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_inc_release(v);
|
||||
}
|
||||
@ -826,6 +880,7 @@ atomic64_dec(atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_dec_return(atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_dec_return(v);
|
||||
}
|
||||
@ -840,6 +895,7 @@ atomic64_dec_return_acquire(atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_dec_return_release(atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_dec_return_release(v);
|
||||
}
|
||||
@ -854,6 +910,7 @@ atomic64_dec_return_relaxed(atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_dec(atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_dec(v);
|
||||
}
|
||||
@ -868,6 +925,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_dec_release(atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_dec_release(v);
|
||||
}
|
||||
@ -889,6 +947,7 @@ atomic64_and(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_and(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_and(i, v);
|
||||
}
|
||||
@ -903,6 +962,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_and_release(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_and_release(i, v);
|
||||
}
|
||||
@ -924,6 +984,7 @@ atomic64_andnot(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_andnot(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_andnot(i, v);
|
||||
}
|
||||
@ -938,6 +999,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_andnot_release(i, v);
|
||||
}
|
||||
@ -959,6 +1021,7 @@ atomic64_or(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_or(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_or(i, v);
|
||||
}
|
||||
@ -973,6 +1036,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_or_release(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_or_release(i, v);
|
||||
}
|
||||
@ -994,6 +1058,7 @@ atomic64_xor(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_xor(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_xor(i, v);
|
||||
}
|
||||
@ -1008,6 +1073,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_xor_release(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_xor_release(i, v);
|
||||
}
|
||||
@ -1022,6 +1088,7 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_xchg(atomic64_t *v, s64 i)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_xchg(v, i);
|
||||
}
|
||||
@ -1036,6 +1103,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i)
|
||||
static __always_inline s64
|
||||
atomic64_xchg_release(atomic64_t *v, s64 i)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_xchg_release(v, i);
|
||||
}
|
||||
@ -1050,6 +1118,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 i)
|
||||
static __always_inline s64
|
||||
atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_cmpxchg(v, old, new);
|
||||
}
|
||||
@ -1064,6 +1133,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
|
||||
static __always_inline s64
|
||||
atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_cmpxchg_release(v, old, new);
|
||||
}
|
||||
@ -1078,6 +1148,7 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
|
||||
static __always_inline bool
|
||||
atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
return arch_atomic64_try_cmpxchg(v, old, new);
|
||||
@ -1094,6 +1165,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
|
||||
static __always_inline bool
|
||||
atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
return arch_atomic64_try_cmpxchg_release(v, old, new);
|
||||
@ -1110,6 +1182,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
|
||||
static __always_inline bool
|
||||
atomic64_sub_and_test(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_sub_and_test(i, v);
|
||||
}
|
||||
@ -1117,6 +1190,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v)
|
||||
static __always_inline bool
|
||||
atomic64_dec_and_test(atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_dec_and_test(v);
|
||||
}
|
||||
@ -1124,6 +1198,7 @@ atomic64_dec_and_test(atomic64_t *v)
|
||||
static __always_inline bool
|
||||
atomic64_inc_and_test(atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_inc_and_test(v);
|
||||
}
|
||||
@ -1131,6 +1206,7 @@ atomic64_inc_and_test(atomic64_t *v)
|
||||
static __always_inline bool
|
||||
atomic64_add_negative(s64 i, atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_add_negative(i, v);
|
||||
}
|
||||
@ -1138,6 +1214,7 @@ atomic64_add_negative(s64 i, atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_fetch_add_unless(v, a, u);
|
||||
}
|
||||
@ -1145,6 +1222,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
|
||||
static __always_inline bool
|
||||
atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_add_unless(v, a, u);
|
||||
}
|
||||
@ -1152,6 +1230,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
|
||||
static __always_inline bool
|
||||
atomic64_inc_not_zero(atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_inc_not_zero(v);
|
||||
}
|
||||
@ -1159,6 +1238,7 @@ atomic64_inc_not_zero(atomic64_t *v)
|
||||
static __always_inline bool
|
||||
atomic64_inc_unless_negative(atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_inc_unless_negative(v);
|
||||
}
|
||||
@ -1166,6 +1246,7 @@ atomic64_inc_unless_negative(atomic64_t *v)
|
||||
static __always_inline bool
|
||||
atomic64_dec_unless_positive(atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_dec_unless_positive(v);
|
||||
}
|
||||
@ -1173,6 +1254,7 @@ atomic64_dec_unless_positive(atomic64_t *v)
|
||||
static __always_inline s64
|
||||
atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic64_dec_if_positive(v);
|
||||
}
|
||||
@ -1201,6 +1283,7 @@ atomic_long_set(atomic_long_t *v, long i)
|
||||
static __always_inline void
|
||||
atomic_long_set_release(atomic_long_t *v, long i)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_write(v, sizeof(*v));
|
||||
arch_atomic_long_set_release(v, i);
|
||||
}
|
||||
@ -1215,6 +1298,7 @@ atomic_long_add(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_add_return(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_add_return(i, v);
|
||||
}
|
||||
@ -1229,6 +1313,7 @@ atomic_long_add_return_acquire(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_add_return_release(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_add_return_release(i, v);
|
||||
}
|
||||
@ -1243,6 +1328,7 @@ atomic_long_add_return_relaxed(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_add(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_add(i, v);
|
||||
}
|
||||
@ -1257,6 +1343,7 @@ atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_add_release(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_add_release(i, v);
|
||||
}
|
||||
@ -1278,6 +1365,7 @@ atomic_long_sub(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_sub_return(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_sub_return(i, v);
|
||||
}
|
||||
@ -1292,6 +1380,7 @@ atomic_long_sub_return_acquire(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_sub_return_release(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_sub_return_release(i, v);
|
||||
}
|
||||
@ -1306,6 +1395,7 @@ atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_sub(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_sub(i, v);
|
||||
}
|
||||
@ -1320,6 +1410,7 @@ atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_sub_release(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_sub_release(i, v);
|
||||
}
|
||||
@ -1341,6 +1432,7 @@ atomic_long_inc(atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_inc_return(atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_inc_return(v);
|
||||
}
|
||||
@ -1355,6 +1447,7 @@ atomic_long_inc_return_acquire(atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_inc_return_release(atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_inc_return_release(v);
|
||||
}
|
||||
@ -1369,6 +1462,7 @@ atomic_long_inc_return_relaxed(atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_inc(atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_inc(v);
|
||||
}
|
||||
@ -1383,6 +1477,7 @@ atomic_long_fetch_inc_acquire(atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_inc_release(atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_inc_release(v);
|
||||
}
|
||||
@ -1404,6 +1499,7 @@ atomic_long_dec(atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_dec_return(atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_dec_return(v);
|
||||
}
|
||||
@ -1418,6 +1514,7 @@ atomic_long_dec_return_acquire(atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_dec_return_release(atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_dec_return_release(v);
|
||||
}
|
||||
@ -1432,6 +1529,7 @@ atomic_long_dec_return_relaxed(atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_dec(atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_dec(v);
|
||||
}
|
||||
@ -1446,6 +1544,7 @@ atomic_long_fetch_dec_acquire(atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_dec_release(atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_dec_release(v);
|
||||
}
|
||||
@ -1467,6 +1566,7 @@ atomic_long_and(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_and(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_and(i, v);
|
||||
}
|
||||
@ -1481,6 +1581,7 @@ atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_and_release(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_and_release(i, v);
|
||||
}
|
||||
@ -1502,6 +1603,7 @@ atomic_long_andnot(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_andnot(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_andnot(i, v);
|
||||
}
|
||||
@ -1516,6 +1618,7 @@ atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_andnot_release(i, v);
|
||||
}
|
||||
@ -1537,6 +1640,7 @@ atomic_long_or(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_or(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_or(i, v);
|
||||
}
|
||||
@ -1551,6 +1655,7 @@ atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_or_release(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_or_release(i, v);
|
||||
}
|
||||
@ -1572,6 +1677,7 @@ atomic_long_xor(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_xor(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_xor(i, v);
|
||||
}
|
||||
@ -1586,6 +1692,7 @@ atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_xor_release(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_xor_release(i, v);
|
||||
}
|
||||
@ -1600,6 +1707,7 @@ atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_xchg(atomic_long_t *v, long i)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_xchg(v, i);
|
||||
}
|
||||
@ -1614,6 +1722,7 @@ atomic_long_xchg_acquire(atomic_long_t *v, long i)
|
||||
static __always_inline long
|
||||
atomic_long_xchg_release(atomic_long_t *v, long i)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_xchg_release(v, i);
|
||||
}
|
||||
@ -1628,6 +1737,7 @@ atomic_long_xchg_relaxed(atomic_long_t *v, long i)
|
||||
static __always_inline long
|
||||
atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_cmpxchg(v, old, new);
|
||||
}
|
||||
@ -1642,6 +1752,7 @@ atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
|
||||
static __always_inline long
|
||||
atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_cmpxchg_release(v, old, new);
|
||||
}
|
||||
@ -1656,6 +1767,7 @@ atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
|
||||
static __always_inline bool
|
||||
atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
return arch_atomic_long_try_cmpxchg(v, old, new);
|
||||
@ -1672,6 +1784,7 @@ atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
|
||||
static __always_inline bool
|
||||
atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
instrument_atomic_read_write(old, sizeof(*old));
|
||||
return arch_atomic_long_try_cmpxchg_release(v, old, new);
|
||||
@ -1688,6 +1801,7 @@ atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
|
||||
static __always_inline bool
|
||||
atomic_long_sub_and_test(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_sub_and_test(i, v);
|
||||
}
|
||||
@ -1695,6 +1809,7 @@ atomic_long_sub_and_test(long i, atomic_long_t *v)
|
||||
static __always_inline bool
|
||||
atomic_long_dec_and_test(atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_dec_and_test(v);
|
||||
}
|
||||
@ -1702,6 +1817,7 @@ atomic_long_dec_and_test(atomic_long_t *v)
|
||||
static __always_inline bool
|
||||
atomic_long_inc_and_test(atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_inc_and_test(v);
|
||||
}
|
||||
@ -1709,6 +1825,7 @@ atomic_long_inc_and_test(atomic_long_t *v)
|
||||
static __always_inline bool
|
||||
atomic_long_add_negative(long i, atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_add_negative(i, v);
|
||||
}
|
||||
@ -1716,6 +1833,7 @@ atomic_long_add_negative(long i, atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_fetch_add_unless(v, a, u);
|
||||
}
|
||||
@ -1723,6 +1841,7 @@ atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
|
||||
static __always_inline bool
|
||||
atomic_long_add_unless(atomic_long_t *v, long a, long u)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_add_unless(v, a, u);
|
||||
}
|
||||
@ -1730,6 +1849,7 @@ atomic_long_add_unless(atomic_long_t *v, long a, long u)
|
||||
static __always_inline bool
|
||||
atomic_long_inc_not_zero(atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_inc_not_zero(v);
|
||||
}
|
||||
@ -1737,6 +1857,7 @@ atomic_long_inc_not_zero(atomic_long_t *v)
|
||||
static __always_inline bool
|
||||
atomic_long_inc_unless_negative(atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_inc_unless_negative(v);
|
||||
}
|
||||
@ -1744,6 +1865,7 @@ atomic_long_inc_unless_negative(atomic_long_t *v)
|
||||
static __always_inline bool
|
||||
atomic_long_dec_unless_positive(atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_dec_unless_positive(v);
|
||||
}
|
||||
@ -1751,6 +1873,7 @@ atomic_long_dec_unless_positive(atomic_long_t *v)
|
||||
static __always_inline long
|
||||
atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return arch_atomic_long_dec_if_positive(v);
|
||||
}
|
||||
@ -1758,6 +1881,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
#define xchg(ptr, ...) \
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
kcsan_mb(); \
|
||||
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
|
||||
arch_xchg(__ai_ptr, __VA_ARGS__); \
|
||||
})
|
||||
@ -1772,6 +1896,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
#define xchg_release(ptr, ...) \
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
kcsan_release(); \
|
||||
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
|
||||
arch_xchg_release(__ai_ptr, __VA_ARGS__); \
|
||||
})
|
||||
@ -1786,6 +1911,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
#define cmpxchg(ptr, ...) \
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
kcsan_mb(); \
|
||||
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
|
||||
arch_cmpxchg(__ai_ptr, __VA_ARGS__); \
|
||||
})
|
||||
@ -1800,6 +1926,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
#define cmpxchg_release(ptr, ...) \
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
kcsan_release(); \
|
||||
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
|
||||
arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \
|
||||
})
|
||||
@ -1814,6 +1941,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
#define cmpxchg64(ptr, ...) \
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
kcsan_mb(); \
|
||||
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
|
||||
arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \
|
||||
})
|
||||
@ -1828,6 +1956,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
#define cmpxchg64_release(ptr, ...) \
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
kcsan_release(); \
|
||||
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
|
||||
arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \
|
||||
})
|
||||
@ -1843,6 +1972,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
typeof(oldp) __ai_oldp = (oldp); \
|
||||
kcsan_mb(); \
|
||||
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
|
||||
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
|
||||
arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \
|
||||
@ -1861,6 +1991,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
typeof(oldp) __ai_oldp = (oldp); \
|
||||
kcsan_release(); \
|
||||
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
|
||||
instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \
|
||||
arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \
|
||||
@ -1892,6 +2023,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
#define sync_cmpxchg(ptr, ...) \
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
kcsan_mb(); \
|
||||
instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \
|
||||
arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \
|
||||
})
|
||||
@ -1899,6 +2031,7 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
#define cmpxchg_double(ptr, ...) \
|
||||
({ \
|
||||
typeof(ptr) __ai_ptr = (ptr); \
|
||||
kcsan_mb(); \
|
||||
instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \
|
||||
arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \
|
||||
})
|
||||
@ -1912,4 +2045,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
})
|
||||
|
||||
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
|
||||
// 2a9553f0a9d5619f19151092df5cabbbf16ce835
|
||||
// 87c974b93032afd42143613434d1a7788fa598f9
|
||||
|
@ -308,6 +308,24 @@
|
||||
# define __compiletime_warning(msg)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Optional: only supported since clang >= 14.0
|
||||
*
|
||||
* clang: https://clang.llvm.org/docs/AttributeReference.html#disable-sanitizer-instrumentation
|
||||
*
|
||||
* disable_sanitizer_instrumentation is not always similar to
|
||||
* no_sanitize((<sanitizer-name>)): the latter may still let specific sanitizers
|
||||
* insert code into functions to prevent false positives. Unlike that,
|
||||
* disable_sanitizer_instrumentation prevents all kinds of instrumentation to
|
||||
* functions with the attribute.
|
||||
*/
|
||||
#if __has_attribute(disable_sanitizer_instrumentation)
|
||||
# define __disable_sanitizer_instrumentation \
|
||||
__attribute__((disable_sanitizer_instrumentation))
|
||||
#else
|
||||
# define __disable_sanitizer_instrumentation
|
||||
#endif
|
||||
|
||||
/*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
|
||||
|
@ -198,9 +198,20 @@ struct ftrace_likely_data {
|
||||
# define __no_kasan_or_inline __always_inline
|
||||
#endif
|
||||
|
||||
#define __no_kcsan __no_sanitize_thread
|
||||
#ifdef __SANITIZE_THREAD__
|
||||
/*
|
||||
* Clang still emits instrumentation for __tsan_func_{entry,exit}() and builtin
|
||||
* atomics even with __no_sanitize_thread (to avoid false positives in userspace
|
||||
* ThreadSanitizer). The kernel's requirements are stricter and we really do not
|
||||
* want any instrumentation with __no_kcsan.
|
||||
*
|
||||
* Therefore we add __disable_sanitizer_instrumentation where available to
|
||||
* disable all instrumentation. See Kconfig.kcsan where this is mandatory.
|
||||
*/
|
||||
# define __no_kcsan __no_sanitize_thread __disable_sanitizer_instrumentation
|
||||
# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
|
||||
#else
|
||||
# define __no_kcsan
|
||||
#endif
|
||||
|
||||
#ifndef __no_sanitize_or_inline
|
||||
|
@ -36,6 +36,36 @@
|
||||
*/
|
||||
void __kcsan_check_access(const volatile void *ptr, size_t size, int type);
|
||||
|
||||
/*
|
||||
* See definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
|
||||
* Note: The mappings are arbitrary, and do not reflect any real mappings of C11
|
||||
* memory orders to the LKMM memory orders and vice-versa!
|
||||
*/
|
||||
#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb __ATOMIC_SEQ_CST
|
||||
#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb __ATOMIC_ACQ_REL
|
||||
#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb __ATOMIC_ACQUIRE
|
||||
#define __KCSAN_BARRIER_TO_SIGNAL_FENCE_release __ATOMIC_RELEASE
|
||||
|
||||
/**
|
||||
* __kcsan_mb - full memory barrier instrumentation
|
||||
*/
|
||||
void __kcsan_mb(void);
|
||||
|
||||
/**
|
||||
* __kcsan_wmb - write memory barrier instrumentation
|
||||
*/
|
||||
void __kcsan_wmb(void);
|
||||
|
||||
/**
|
||||
* __kcsan_rmb - read memory barrier instrumentation
|
||||
*/
|
||||
void __kcsan_rmb(void);
|
||||
|
||||
/**
|
||||
* __kcsan_release - release barrier instrumentation
|
||||
*/
|
||||
void __kcsan_release(void);
|
||||
|
||||
/**
|
||||
* kcsan_disable_current - disable KCSAN for the current context
|
||||
*
|
||||
@ -99,7 +129,15 @@ void kcsan_set_access_mask(unsigned long mask);
|
||||
|
||||
/* Scoped access information. */
|
||||
struct kcsan_scoped_access {
|
||||
struct list_head list;
|
||||
union {
|
||||
struct list_head list; /* scoped_accesses list */
|
||||
/*
|
||||
* Not an entry in scoped_accesses list; stack depth from where
|
||||
* the access was initialized.
|
||||
*/
|
||||
int stack_depth;
|
||||
};
|
||||
|
||||
/* Access information. */
|
||||
const volatile void *ptr;
|
||||
size_t size;
|
||||
@ -151,6 +189,10 @@ void kcsan_end_scoped_access(struct kcsan_scoped_access *sa);
|
||||
static inline void __kcsan_check_access(const volatile void *ptr, size_t size,
|
||||
int type) { }
|
||||
|
||||
static inline void __kcsan_mb(void) { }
|
||||
static inline void __kcsan_wmb(void) { }
|
||||
static inline void __kcsan_rmb(void) { }
|
||||
static inline void __kcsan_release(void) { }
|
||||
static inline void kcsan_disable_current(void) { }
|
||||
static inline void kcsan_enable_current(void) { }
|
||||
static inline void kcsan_enable_current_nowarn(void) { }
|
||||
@ -183,12 +225,47 @@ static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { }
|
||||
*/
|
||||
#define __kcsan_disable_current kcsan_disable_current
|
||||
#define __kcsan_enable_current kcsan_enable_current_nowarn
|
||||
#else
|
||||
#else /* __SANITIZE_THREAD__ */
|
||||
static inline void kcsan_check_access(const volatile void *ptr, size_t size,
|
||||
int type) { }
|
||||
static inline void __kcsan_enable_current(void) { }
|
||||
static inline void __kcsan_disable_current(void) { }
|
||||
#endif
|
||||
#endif /* __SANITIZE_THREAD__ */
|
||||
|
||||
#if defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__SANITIZE_THREAD__)
|
||||
/*
|
||||
* Normal barrier instrumentation is not done via explicit calls, but by mapping
|
||||
* to a repurposed __atomic_signal_fence(), which normally does not generate any
|
||||
* real instructions, but is still intercepted by fsanitize=thread. This means,
|
||||
* like any other compile-time instrumentation, barrier instrumentation can be
|
||||
* disabled with the __no_kcsan function attribute.
|
||||
*
|
||||
* Also see definition of __tsan_atomic_signal_fence() in kernel/kcsan/core.c.
|
||||
*
|
||||
* These are all macros, like <asm/barrier.h>, since some architectures use them
|
||||
* in non-static inline functions.
|
||||
*/
|
||||
#define __KCSAN_BARRIER_TO_SIGNAL_FENCE(name) \
|
||||
do { \
|
||||
barrier(); \
|
||||
__atomic_signal_fence(__KCSAN_BARRIER_TO_SIGNAL_FENCE_##name); \
|
||||
barrier(); \
|
||||
} while (0)
|
||||
#define kcsan_mb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(mb)
|
||||
#define kcsan_wmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(wmb)
|
||||
#define kcsan_rmb() __KCSAN_BARRIER_TO_SIGNAL_FENCE(rmb)
|
||||
#define kcsan_release() __KCSAN_BARRIER_TO_SIGNAL_FENCE(release)
|
||||
#elif defined(CONFIG_KCSAN_WEAK_MEMORY) && defined(__KCSAN_INSTRUMENT_BARRIERS__)
|
||||
#define kcsan_mb __kcsan_mb
|
||||
#define kcsan_wmb __kcsan_wmb
|
||||
#define kcsan_rmb __kcsan_rmb
|
||||
#define kcsan_release __kcsan_release
|
||||
#else /* CONFIG_KCSAN_WEAK_MEMORY && ... */
|
||||
#define kcsan_mb() do { } while (0)
|
||||
#define kcsan_wmb() do { } while (0)
|
||||
#define kcsan_rmb() do { } while (0)
|
||||
#define kcsan_release() do { } while (0)
|
||||
#endif /* CONFIG_KCSAN_WEAK_MEMORY && ... */
|
||||
|
||||
/**
|
||||
* __kcsan_check_read - check regular read access for races
|
||||
|
@ -21,6 +21,7 @@
|
||||
*/
|
||||
struct kcsan_ctx {
|
||||
int disable_count; /* disable counter */
|
||||
int disable_scoped; /* disable scoped access counter */
|
||||
int atomic_next; /* number of following atomic ops */
|
||||
|
||||
/*
|
||||
@ -48,8 +49,16 @@ struct kcsan_ctx {
|
||||
*/
|
||||
unsigned long access_mask;
|
||||
|
||||
/* List of scoped accesses. */
|
||||
/* List of scoped accesses; likely to be empty. */
|
||||
struct list_head scoped_accesses;
|
||||
|
||||
#ifdef CONFIG_KCSAN_WEAK_MEMORY
|
||||
/*
|
||||
* Scoped access for modeling access reordering to detect missing memory
|
||||
* barriers; only keep 1 to keep fast-path complexity manageable.
|
||||
*/
|
||||
struct kcsan_scoped_access reorder_access;
|
||||
#endif
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1339,6 +1339,9 @@ struct task_struct {
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
struct irqtrace_events kcsan_save_irqtrace;
|
||||
#endif
|
||||
#ifdef CONFIG_KCSAN_WEAK_MEMORY
|
||||
int kcsan_stack_depth;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_KUNIT)
|
||||
|
@ -171,7 +171,7 @@ do { \
|
||||
* Architectures that can implement ACQUIRE better need to take care.
|
||||
*/
|
||||
#ifndef smp_mb__after_spinlock
|
||||
#define smp_mb__after_spinlock() do { } while (0)
|
||||
#define smp_mb__after_spinlock() kcsan_mb()
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
|
@ -182,11 +182,6 @@ struct task_struct init_task
|
||||
#endif
|
||||
#ifdef CONFIG_KCSAN
|
||||
.kcsan_ctx = {
|
||||
.disable_count = 0,
|
||||
.atomic_next = 0,
|
||||
.atomic_nest_count = 0,
|
||||
.in_flat_atomic = false,
|
||||
.access_mask = 0,
|
||||
.scoped_accesses = {LIST_POISON1, NULL},
|
||||
},
|
||||
#endif
|
||||
|
@ -12,6 +12,8 @@ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
|
||||
-fno-stack-protector -DDISABLE_BRANCH_PROFILING
|
||||
|
||||
obj-y := core.o debugfs.o report.o
|
||||
|
||||
KCSAN_INSTRUMENT_BARRIERS_selftest.o := y
|
||||
obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
|
||||
|
||||
CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
|
||||
|
@ -40,15 +40,17 @@ module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
|
||||
module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
|
||||
module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);
|
||||
|
||||
#ifdef CONFIG_KCSAN_WEAK_MEMORY
|
||||
static bool kcsan_weak_memory = true;
|
||||
module_param_named(weak_memory, kcsan_weak_memory, bool, 0644);
|
||||
#else
|
||||
#define kcsan_weak_memory false
|
||||
#endif
|
||||
|
||||
bool kcsan_enabled;
|
||||
|
||||
/* Per-CPU kcsan_ctx for interrupts */
|
||||
static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
|
||||
.disable_count = 0,
|
||||
.atomic_next = 0,
|
||||
.atomic_nest_count = 0,
|
||||
.in_flat_atomic = false,
|
||||
.access_mask = 0,
|
||||
.scoped_accesses = {LIST_POISON1, NULL},
|
||||
};
|
||||
|
||||
@ -209,15 +211,17 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
|
||||
static noinline void kcsan_check_scoped_accesses(void)
|
||||
{
|
||||
struct kcsan_ctx *ctx = get_ctx();
|
||||
struct list_head *prev_save = ctx->scoped_accesses.prev;
|
||||
struct kcsan_scoped_access *scoped_access;
|
||||
|
||||
ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */
|
||||
if (ctx->disable_scoped)
|
||||
return;
|
||||
|
||||
ctx->disable_scoped++;
|
||||
list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
|
||||
check_access(scoped_access->ptr, scoped_access->size,
|
||||
scoped_access->type, scoped_access->ip);
|
||||
}
|
||||
ctx->scoped_accesses.prev = prev_save;
|
||||
ctx->disable_scoped--;
|
||||
}
|
||||
|
||||
/* Rules for generic atomic accesses. Called from fast-path. */
|
||||
@ -325,6 +329,21 @@ static void delay_access(int type)
|
||||
udelay(delay);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reads the instrumented memory for value change detection; value change
|
||||
* detection is currently done for accesses up to a size of 8 bytes.
|
||||
*/
|
||||
static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1: return READ_ONCE(*(const u8 *)ptr);
|
||||
case 2: return READ_ONCE(*(const u16 *)ptr);
|
||||
case 4: return READ_ONCE(*(const u32 *)ptr);
|
||||
case 8: return READ_ONCE(*(const u64 *)ptr);
|
||||
default: return 0; /* Ignore; we do not diff the values. */
|
||||
}
|
||||
}
|
||||
|
||||
void kcsan_save_irqtrace(struct task_struct *task)
|
||||
{
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
@ -339,6 +358,76 @@ void kcsan_restore_irqtrace(struct task_struct *task)
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline int get_kcsan_stack_depth(void)
|
||||
{
|
||||
#ifdef CONFIG_KCSAN_WEAK_MEMORY
|
||||
return current->kcsan_stack_depth;
|
||||
#else
|
||||
BUILD_BUG();
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline void add_kcsan_stack_depth(int val)
|
||||
{
|
||||
#ifdef CONFIG_KCSAN_WEAK_MEMORY
|
||||
current->kcsan_stack_depth += val;
|
||||
#else
|
||||
BUILD_BUG();
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline struct kcsan_scoped_access *get_reorder_access(struct kcsan_ctx *ctx)
|
||||
{
|
||||
#ifdef CONFIG_KCSAN_WEAK_MEMORY
|
||||
return ctx->disable_scoped ? NULL : &ctx->reorder_access;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline bool
|
||||
find_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
|
||||
int type, unsigned long ip)
|
||||
{
|
||||
struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
|
||||
|
||||
if (!reorder_access)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Note: If accesses are repeated while reorder_access is identical,
|
||||
* never matches the new access, because !(type & KCSAN_ACCESS_SCOPED).
|
||||
*/
|
||||
return reorder_access->ptr == ptr && reorder_access->size == size &&
|
||||
reorder_access->type == type && reorder_access->ip == ip;
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
|
||||
int type, unsigned long ip)
|
||||
{
|
||||
struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
|
||||
|
||||
if (!reorder_access || !kcsan_weak_memory)
|
||||
return;
|
||||
|
||||
/*
|
||||
* To avoid nested interrupts or scheduler (which share kcsan_ctx)
|
||||
* reading an inconsistent reorder_access, ensure that the below has
|
||||
* exclusive access to reorder_access by disallowing concurrent use.
|
||||
*/
|
||||
ctx->disable_scoped++;
|
||||
barrier();
|
||||
reorder_access->ptr = ptr;
|
||||
reorder_access->size = size;
|
||||
reorder_access->type = type | KCSAN_ACCESS_SCOPED;
|
||||
reorder_access->ip = ip;
|
||||
reorder_access->stack_depth = get_kcsan_stack_depth();
|
||||
barrier();
|
||||
ctx->disable_scoped--;
|
||||
}
|
||||
|
||||
/*
|
||||
* Pull everything together: check_access() below contains the performance
|
||||
* critical operations; the fast-path (including check_access) functions should
|
||||
@ -377,8 +466,10 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
|
||||
* The access_mask check relies on value-change comparison. To avoid
|
||||
* reporting a race where e.g. the writer set up the watchpoint, but the
|
||||
* reader has access_mask!=0, we have to ignore the found watchpoint.
|
||||
*
|
||||
* reorder_access is never created from an access with access_mask set.
|
||||
*/
|
||||
if (ctx->access_mask)
|
||||
if (ctx->access_mask && !find_reorder_access(ctx, ptr, size, type, ip))
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -428,11 +519,13 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
|
||||
const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
|
||||
atomic_long_t *watchpoint;
|
||||
u64 old, new, diff;
|
||||
unsigned long access_mask;
|
||||
enum kcsan_value_change value_change = KCSAN_VALUE_CHANGE_MAYBE;
|
||||
bool interrupt_watcher = kcsan_interrupt_watcher;
|
||||
unsigned long ua_flags = user_access_save();
|
||||
struct kcsan_ctx *ctx = get_ctx();
|
||||
unsigned long access_mask = ctx->access_mask;
|
||||
unsigned long irq_flags = 0;
|
||||
bool is_reorder_access;
|
||||
|
||||
/*
|
||||
* Always reset kcsan_skip counter in slow-path to avoid underflow; see
|
||||
@ -455,13 +548,33 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The local CPU cannot observe reordering of its own accesses, and
|
||||
* therefore we need to take care of 2 cases to avoid false positives:
|
||||
*
|
||||
* 1. Races of the reordered access with interrupts. To avoid, if
|
||||
* the current access is reorder_access, disable interrupts.
|
||||
* 2. Avoid races of scoped accesses from nested interrupts (below).
|
||||
*/
|
||||
is_reorder_access = find_reorder_access(ctx, ptr, size, type, ip);
|
||||
if (is_reorder_access)
|
||||
interrupt_watcher = false;
|
||||
/*
|
||||
* Avoid races of scoped accesses from nested interrupts (or scheduler).
|
||||
* Assume setting up a watchpoint for a non-scoped (normal) access that
|
||||
* also conflicts with a current scoped access. In a nested interrupt,
|
||||
* which shares the context, it would check a conflicting scoped access.
|
||||
* To avoid, disable scoped access checking.
|
||||
*/
|
||||
ctx->disable_scoped++;
|
||||
|
||||
/*
|
||||
* Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
|
||||
* runtime is entered for every memory access, and potentially useful
|
||||
* information is lost if dirtied by KCSAN.
|
||||
*/
|
||||
kcsan_save_irqtrace(current);
|
||||
if (!kcsan_interrupt_watcher)
|
||||
if (!interrupt_watcher)
|
||||
local_irq_save(irq_flags);
|
||||
|
||||
watchpoint = insert_watchpoint((unsigned long)ptr, size, is_write);
|
||||
@ -482,23 +595,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
|
||||
* Read the current value, to later check and infer a race if the data
|
||||
* was modified via a non-instrumented access, e.g. from a device.
|
||||
*/
|
||||
old = 0;
|
||||
switch (size) {
|
||||
case 1:
|
||||
old = READ_ONCE(*(const u8 *)ptr);
|
||||
break;
|
||||
case 2:
|
||||
old = READ_ONCE(*(const u16 *)ptr);
|
||||
break;
|
||||
case 4:
|
||||
old = READ_ONCE(*(const u32 *)ptr);
|
||||
break;
|
||||
case 8:
|
||||
old = READ_ONCE(*(const u64 *)ptr);
|
||||
break;
|
||||
default:
|
||||
break; /* ignore; we do not diff the values */
|
||||
}
|
||||
old = is_reorder_access ? 0 : read_instrumented_memory(ptr, size);
|
||||
|
||||
/*
|
||||
* Delay this thread, to increase probability of observing a racy
|
||||
@ -510,23 +607,16 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
|
||||
* Re-read value, and check if it is as expected; if not, we infer a
|
||||
* racy access.
|
||||
*/
|
||||
access_mask = ctx->access_mask;
|
||||
new = 0;
|
||||
switch (size) {
|
||||
case 1:
|
||||
new = READ_ONCE(*(const u8 *)ptr);
|
||||
break;
|
||||
case 2:
|
||||
new = READ_ONCE(*(const u16 *)ptr);
|
||||
break;
|
||||
case 4:
|
||||
new = READ_ONCE(*(const u32 *)ptr);
|
||||
break;
|
||||
case 8:
|
||||
new = READ_ONCE(*(const u64 *)ptr);
|
||||
break;
|
||||
default:
|
||||
break; /* ignore; we do not diff the values */
|
||||
if (!is_reorder_access) {
|
||||
new = read_instrumented_memory(ptr, size);
|
||||
} else {
|
||||
/*
|
||||
* Reordered accesses cannot be used for value change detection,
|
||||
* because the memory location may no longer be accessible and
|
||||
* could result in a fault.
|
||||
*/
|
||||
new = 0;
|
||||
access_mask = 0;
|
||||
}
|
||||
|
||||
diff = old ^ new;
|
||||
@ -596,10 +686,20 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
|
||||
*/
|
||||
remove_watchpoint(watchpoint);
|
||||
atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
|
||||
|
||||
out_unlock:
|
||||
if (!kcsan_interrupt_watcher)
|
||||
if (!interrupt_watcher)
|
||||
local_irq_restore(irq_flags);
|
||||
kcsan_restore_irqtrace(current);
|
||||
ctx->disable_scoped--;
|
||||
|
||||
/*
|
||||
* Reordered accesses cannot be used for value change detection,
|
||||
* therefore never consider for reordering if access_mask is set.
|
||||
* ASSERT_EXCLUSIVE are not real accesses, ignore them as well.
|
||||
*/
|
||||
if (!access_mask && !is_assert)
|
||||
set_reorder_access(ctx, ptr, size, type, ip);
|
||||
out:
|
||||
user_access_restore(ua_flags);
|
||||
}
|
||||
@ -607,7 +707,6 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
|
||||
static __always_inline void
|
||||
check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
|
||||
{
|
||||
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
|
||||
atomic_long_t *watchpoint;
|
||||
long encoded_watchpoint;
|
||||
|
||||
@ -618,12 +717,14 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
|
||||
if (unlikely(size == 0))
|
||||
return;
|
||||
|
||||
again:
|
||||
/*
|
||||
* Avoid user_access_save in fast-path: find_watchpoint is safe without
|
||||
* user_access_save, as the address that ptr points to is only used to
|
||||
* check if a watchpoint exists; ptr is never dereferenced.
|
||||
*/
|
||||
watchpoint = find_watchpoint((unsigned long)ptr, size, !is_write,
|
||||
watchpoint = find_watchpoint((unsigned long)ptr, size,
|
||||
!(type & KCSAN_ACCESS_WRITE),
|
||||
&encoded_watchpoint);
|
||||
/*
|
||||
* It is safe to check kcsan_is_enabled() after find_watchpoint in the
|
||||
@ -637,9 +738,42 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
|
||||
else {
|
||||
struct kcsan_ctx *ctx = get_ctx(); /* Call only once in fast-path. */
|
||||
|
||||
if (unlikely(should_watch(ctx, ptr, size, type)))
|
||||
if (unlikely(should_watch(ctx, ptr, size, type))) {
|
||||
kcsan_setup_watchpoint(ptr, size, type, ip);
|
||||
else if (unlikely(ctx->scoped_accesses.prev))
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(type & KCSAN_ACCESS_SCOPED)) {
|
||||
struct kcsan_scoped_access *reorder_access = get_reorder_access(ctx);
|
||||
|
||||
if (reorder_access) {
|
||||
/*
|
||||
* reorder_access check: simulates reordering of
|
||||
* the access after subsequent operations.
|
||||
*/
|
||||
ptr = reorder_access->ptr;
|
||||
type = reorder_access->type;
|
||||
ip = reorder_access->ip;
|
||||
/*
|
||||
* Upon a nested interrupt, this context's
|
||||
* reorder_access can be modified (shared ctx).
|
||||
* We know that upon return, reorder_access is
|
||||
* always invalidated by setting size to 0 via
|
||||
* __tsan_func_exit(). Therefore we must read
|
||||
* and check size after the other fields.
|
||||
*/
|
||||
barrier();
|
||||
size = READ_ONCE(reorder_access->size);
|
||||
if (size)
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Always checked last, right before returning from runtime;
|
||||
* if reorder_access is valid, checked after it was checked.
|
||||
*/
|
||||
if (unlikely(ctx->scoped_accesses.prev))
|
||||
kcsan_check_scoped_accesses();
|
||||
}
|
||||
}
|
||||
@ -814,6 +948,22 @@ void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
|
||||
}
|
||||
EXPORT_SYMBOL(__kcsan_check_access);
|
||||
|
||||
#define DEFINE_MEMORY_BARRIER(name, order_before_cond) \
|
||||
void __kcsan_##name(void) \
|
||||
{ \
|
||||
struct kcsan_scoped_access *sa = get_reorder_access(get_ctx()); \
|
||||
if (!sa) \
|
||||
return; \
|
||||
if (order_before_cond) \
|
||||
sa->size = 0; \
|
||||
} \
|
||||
EXPORT_SYMBOL(__kcsan_##name)
|
||||
|
||||
DEFINE_MEMORY_BARRIER(mb, true);
|
||||
DEFINE_MEMORY_BARRIER(wmb, sa->type & (KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND));
|
||||
DEFINE_MEMORY_BARRIER(rmb, !(sa->type & KCSAN_ACCESS_WRITE) || (sa->type & KCSAN_ACCESS_COMPOUND));
|
||||
DEFINE_MEMORY_BARRIER(release, true);
|
||||
|
||||
/*
|
||||
* KCSAN uses the same instrumentation that is emitted by supported compilers
|
||||
* for ThreadSanitizer (TSAN).
|
||||
@ -926,19 +1076,56 @@ DEFINE_TSAN_VOLATILE_READ_WRITE(8);
|
||||
DEFINE_TSAN_VOLATILE_READ_WRITE(16);
|
||||
|
||||
/*
|
||||
* The below are not required by KCSAN, but can still be emitted by the
|
||||
* compiler.
|
||||
* Function entry and exit are used to determine the validty of reorder_access.
|
||||
* Reordering of the access ends at the end of the function scope where the
|
||||
* access happened. This is done for two reasons:
|
||||
*
|
||||
* 1. Artificially limits the scope where missing barriers are detected.
|
||||
* This minimizes false positives due to uninstrumented functions that
|
||||
* contain the required barriers but were missed.
|
||||
*
|
||||
* 2. Simplifies generating the stack trace of the access.
|
||||
*/
|
||||
void __tsan_func_entry(void *call_pc);
|
||||
void __tsan_func_entry(void *call_pc)
|
||||
noinline void __tsan_func_entry(void *call_pc)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
|
||||
return;
|
||||
|
||||
add_kcsan_stack_depth(1);
|
||||
}
|
||||
EXPORT_SYMBOL(__tsan_func_entry);
|
||||
|
||||
void __tsan_func_exit(void);
|
||||
void __tsan_func_exit(void)
|
||||
noinline void __tsan_func_exit(void)
|
||||
{
|
||||
struct kcsan_scoped_access *reorder_access;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
|
||||
return;
|
||||
|
||||
reorder_access = get_reorder_access(get_ctx());
|
||||
if (!reorder_access)
|
||||
goto out;
|
||||
|
||||
if (get_kcsan_stack_depth() <= reorder_access->stack_depth) {
|
||||
/*
|
||||
* Access check to catch cases where write without a barrier
|
||||
* (supposed release) was last access in function: because
|
||||
* instrumentation is inserted before the real access, a data
|
||||
* race due to the write giving up a c-s would only be caught if
|
||||
* we do the conflicting access after.
|
||||
*/
|
||||
check_access(reorder_access->ptr, reorder_access->size,
|
||||
reorder_access->type, reorder_access->ip);
|
||||
reorder_access->size = 0;
|
||||
reorder_access->stack_depth = INT_MIN;
|
||||
}
|
||||
out:
|
||||
add_kcsan_stack_depth(-1);
|
||||
}
|
||||
EXPORT_SYMBOL(__tsan_func_exit);
|
||||
|
||||
void __tsan_init(void);
|
||||
void __tsan_init(void)
|
||||
{
|
||||
@ -961,10 +1148,19 @@ EXPORT_SYMBOL(__tsan_init);
|
||||
* functions, whose job is to also execute the operation itself.
|
||||
*/
|
||||
|
||||
static __always_inline void kcsan_atomic_builtin_memorder(int memorder)
|
||||
{
|
||||
if (memorder == __ATOMIC_RELEASE ||
|
||||
memorder == __ATOMIC_SEQ_CST ||
|
||||
memorder == __ATOMIC_ACQ_REL)
|
||||
__kcsan_release();
|
||||
}
|
||||
|
||||
#define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
|
||||
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
|
||||
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
|
||||
{ \
|
||||
kcsan_atomic_builtin_memorder(memorder); \
|
||||
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
|
||||
check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC, _RET_IP_); \
|
||||
} \
|
||||
@ -974,6 +1170,7 @@ EXPORT_SYMBOL(__tsan_init);
|
||||
void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
|
||||
void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
|
||||
{ \
|
||||
kcsan_atomic_builtin_memorder(memorder); \
|
||||
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
|
||||
check_access(ptr, bits / BITS_PER_BYTE, \
|
||||
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC, _RET_IP_); \
|
||||
@ -986,6 +1183,7 @@ EXPORT_SYMBOL(__tsan_init);
|
||||
u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
|
||||
u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
|
||||
{ \
|
||||
kcsan_atomic_builtin_memorder(memorder); \
|
||||
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
|
||||
check_access(ptr, bits / BITS_PER_BYTE, \
|
||||
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
|
||||
@ -1018,6 +1216,7 @@ EXPORT_SYMBOL(__tsan_init);
|
||||
int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
|
||||
u##bits val, int mo, int fail_mo) \
|
||||
{ \
|
||||
kcsan_atomic_builtin_memorder(mo); \
|
||||
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
|
||||
check_access(ptr, bits / BITS_PER_BYTE, \
|
||||
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
|
||||
@ -1033,6 +1232,7 @@ EXPORT_SYMBOL(__tsan_init);
|
||||
u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
|
||||
int mo, int fail_mo) \
|
||||
{ \
|
||||
kcsan_atomic_builtin_memorder(mo); \
|
||||
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
|
||||
check_access(ptr, bits / BITS_PER_BYTE, \
|
||||
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
|
||||
@ -1064,10 +1264,47 @@ DEFINE_TSAN_ATOMIC_OPS(64);
|
||||
void __tsan_atomic_thread_fence(int memorder);
|
||||
void __tsan_atomic_thread_fence(int memorder)
|
||||
{
|
||||
kcsan_atomic_builtin_memorder(memorder);
|
||||
__atomic_thread_fence(memorder);
|
||||
}
|
||||
EXPORT_SYMBOL(__tsan_atomic_thread_fence);
|
||||
|
||||
/*
|
||||
* In instrumented files, we emit instrumentation for barriers by mapping the
|
||||
* kernel barriers to an __atomic_signal_fence(), which is interpreted specially
|
||||
* and otherwise has no relation to a real __atomic_signal_fence(). No known
|
||||
* kernel code uses __atomic_signal_fence().
|
||||
*
|
||||
* Since fsanitize=thread instrumentation handles __atomic_signal_fence(), which
|
||||
* are turned into calls to __tsan_atomic_signal_fence(), such instrumentation
|
||||
* can be disabled via the __no_kcsan function attribute (vs. an explicit call
|
||||
* which could not). When __no_kcsan is requested, __atomic_signal_fence()
|
||||
* generates no code.
|
||||
*
|
||||
* Note: The result of using __atomic_signal_fence() with KCSAN enabled is
|
||||
* potentially limiting the compiler's ability to reorder operations; however,
|
||||
* if barriers were instrumented with explicit calls (without LTO), the compiler
|
||||
* couldn't optimize much anyway. The result of a hypothetical architecture
|
||||
* using __atomic_signal_fence() in normal code would be KCSAN false negatives.
|
||||
*/
|
||||
void __tsan_atomic_signal_fence(int memorder);
|
||||
void __tsan_atomic_signal_fence(int memorder) { }
|
||||
noinline void __tsan_atomic_signal_fence(int memorder)
|
||||
{
|
||||
switch (memorder) {
|
||||
case __KCSAN_BARRIER_TO_SIGNAL_FENCE_mb:
|
||||
__kcsan_mb();
|
||||
break;
|
||||
case __KCSAN_BARRIER_TO_SIGNAL_FENCE_wmb:
|
||||
__kcsan_wmb();
|
||||
break;
|
||||
case __KCSAN_BARRIER_TO_SIGNAL_FENCE_rmb:
|
||||
__kcsan_rmb();
|
||||
break;
|
||||
case __KCSAN_BARRIER_TO_SIGNAL_FENCE_release:
|
||||
__kcsan_release();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__tsan_atomic_signal_fence);
|
||||
|
@ -16,9 +16,12 @@
|
||||
#define pr_fmt(fmt) "kcsan_test: " fmt
|
||||
|
||||
#include <kunit/test.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kcsan-checks.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seqlock.h>
|
||||
#include <linux/spinlock.h>
|
||||
@ -151,7 +154,7 @@ struct expect_report {
|
||||
|
||||
/* Check observed report matches information in @r. */
|
||||
__no_kcsan
|
||||
static bool report_matches(const struct expect_report *r)
|
||||
static bool __report_matches(const struct expect_report *r)
|
||||
{
|
||||
const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT;
|
||||
bool ret = false;
|
||||
@ -213,9 +216,9 @@ static bool report_matches(const struct expect_report *r)
|
||||
const bool is_atomic = (ty & KCSAN_ACCESS_ATOMIC);
|
||||
const bool is_scoped = (ty & KCSAN_ACCESS_SCOPED);
|
||||
const char *const access_type_aux =
|
||||
(is_atomic && is_scoped) ? " (marked, scoped)"
|
||||
(is_atomic && is_scoped) ? " (marked, reordered)"
|
||||
: (is_atomic ? " (marked)"
|
||||
: (is_scoped ? " (scoped)" : ""));
|
||||
: (is_scoped ? " (reordered)" : ""));
|
||||
|
||||
if (i == 1) {
|
||||
/* Access 2 */
|
||||
@ -253,6 +256,40 @@ static bool report_matches(const struct expect_report *r)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline const struct expect_report *
|
||||
__report_set_scoped(struct expect_report *r, int accesses)
|
||||
{
|
||||
BUILD_BUG_ON(accesses > 3);
|
||||
|
||||
if (accesses & 1)
|
||||
r->access[0].type |= KCSAN_ACCESS_SCOPED;
|
||||
else
|
||||
r->access[0].type &= ~KCSAN_ACCESS_SCOPED;
|
||||
|
||||
if (accesses & 2)
|
||||
r->access[1].type |= KCSAN_ACCESS_SCOPED;
|
||||
else
|
||||
r->access[1].type &= ~KCSAN_ACCESS_SCOPED;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
__no_kcsan
|
||||
static bool report_matches_any_reordered(struct expect_report *r)
|
||||
{
|
||||
return __report_matches(__report_set_scoped(r, 0)) ||
|
||||
__report_matches(__report_set_scoped(r, 1)) ||
|
||||
__report_matches(__report_set_scoped(r, 2)) ||
|
||||
__report_matches(__report_set_scoped(r, 3));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KCSAN_WEAK_MEMORY
|
||||
/* Due to reordering accesses, any access may appear as "(reordered)". */
|
||||
#define report_matches report_matches_any_reordered
|
||||
#else
|
||||
#define report_matches __report_matches
|
||||
#endif
|
||||
|
||||
/* ===== Test kernels ===== */
|
||||
|
||||
static long test_sink;
|
||||
@ -263,6 +300,8 @@ static struct {
|
||||
long val[8];
|
||||
} test_struct;
|
||||
static DEFINE_SEQLOCK(test_seqlock);
|
||||
static DEFINE_SPINLOCK(test_spinlock);
|
||||
static DEFINE_MUTEX(test_mutex);
|
||||
|
||||
/*
|
||||
* Helper to avoid compiler optimizing out reads, and to generate source values
|
||||
@ -271,6 +310,16 @@ static DEFINE_SEQLOCK(test_seqlock);
|
||||
__no_kcsan
|
||||
static noinline void sink_value(long v) { WRITE_ONCE(test_sink, v); }
|
||||
|
||||
/*
|
||||
* Generates a delay and some accesses that enter the runtime but do not produce
|
||||
* data races.
|
||||
*/
|
||||
static noinline void test_delay(int iter)
|
||||
{
|
||||
while (iter--)
|
||||
sink_value(READ_ONCE(test_sink));
|
||||
}
|
||||
|
||||
static noinline void test_kernel_read(void) { sink_value(test_var); }
|
||||
|
||||
static noinline void test_kernel_write(void)
|
||||
@ -432,19 +481,239 @@ static noinline void test_kernel_xor_1bit(void)
|
||||
kcsan_nestable_atomic_end();
|
||||
}
|
||||
|
||||
#define TEST_KERNEL_LOCKED(name, acquire, release) \
|
||||
static noinline void test_kernel_##name(void) \
|
||||
{ \
|
||||
long *flag = &test_struct.val[0]; \
|
||||
long v = 0; \
|
||||
if (!(acquire)) \
|
||||
return; \
|
||||
while (v++ < 100) { \
|
||||
test_var++; \
|
||||
barrier(); \
|
||||
} \
|
||||
release; \
|
||||
test_delay(10); \
|
||||
}
|
||||
|
||||
TEST_KERNEL_LOCKED(with_memorder,
|
||||
cmpxchg_acquire(flag, 0, 1) == 0,
|
||||
smp_store_release(flag, 0));
|
||||
TEST_KERNEL_LOCKED(wrong_memorder,
|
||||
cmpxchg_relaxed(flag, 0, 1) == 0,
|
||||
WRITE_ONCE(*flag, 0));
|
||||
TEST_KERNEL_LOCKED(atomic_builtin_with_memorder,
|
||||
__atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED),
|
||||
__atomic_store_n(flag, 0, __ATOMIC_RELEASE));
|
||||
TEST_KERNEL_LOCKED(atomic_builtin_wrong_memorder,
|
||||
__atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED),
|
||||
__atomic_store_n(flag, 0, __ATOMIC_RELAXED));
|
||||
|
||||
/* ===== Test cases ===== */
|
||||
|
||||
/*
|
||||
* Tests that various barriers have the expected effect on internal state. Not
|
||||
* exhaustive on atomic_t operations. Unlike the selftest, also checks for
|
||||
* too-strict barrier instrumentation; these can be tolerated, because it does
|
||||
* not cause false positives, but at least we should be aware of such cases.
|
||||
*/
|
||||
static void test_barrier_nothreads(struct kunit *test)
|
||||
{
|
||||
#ifdef CONFIG_KCSAN_WEAK_MEMORY
|
||||
struct kcsan_scoped_access *reorder_access = ¤t->kcsan_ctx.reorder_access;
|
||||
#else
|
||||
struct kcsan_scoped_access *reorder_access = NULL;
|
||||
#endif
|
||||
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
atomic_t dummy;
|
||||
|
||||
KCSAN_TEST_REQUIRES(test, reorder_access != NULL);
|
||||
KCSAN_TEST_REQUIRES(test, IS_ENABLED(CONFIG_SMP));
|
||||
|
||||
#define __KCSAN_EXPECT_BARRIER(access_type, barrier, order_before, name) \
|
||||
do { \
|
||||
reorder_access->type = (access_type) | KCSAN_ACCESS_SCOPED; \
|
||||
reorder_access->size = sizeof(test_var); \
|
||||
barrier; \
|
||||
KUNIT_EXPECT_EQ_MSG(test, reorder_access->size, \
|
||||
order_before ? 0 : sizeof(test_var), \
|
||||
"improperly instrumented type=(" #access_type "): " name); \
|
||||
} while (0)
|
||||
#define KCSAN_EXPECT_READ_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(0, b, o, #b)
|
||||
#define KCSAN_EXPECT_WRITE_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_WRITE, b, o, #b)
|
||||
#define KCSAN_EXPECT_RW_BARRIER(b, o) __KCSAN_EXPECT_BARRIER(KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE, b, o, #b)
|
||||
|
||||
/*
|
||||
* Lockdep initialization can strengthen certain locking operations due
|
||||
* to calling into instrumented files; "warm up" our locks.
|
||||
*/
|
||||
spin_lock(&test_spinlock);
|
||||
spin_unlock(&test_spinlock);
|
||||
mutex_lock(&test_mutex);
|
||||
mutex_unlock(&test_mutex);
|
||||
|
||||
/* Force creating a valid entry in reorder_access first. */
|
||||
test_var = 0;
|
||||
while (test_var++ < 1000000 && reorder_access->size != sizeof(test_var))
|
||||
__kcsan_check_read(&test_var, sizeof(test_var));
|
||||
KUNIT_ASSERT_EQ(test, reorder_access->size, sizeof(test_var));
|
||||
|
||||
kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */
|
||||
|
||||
KCSAN_EXPECT_READ_BARRIER(mb(), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(wmb(), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(rmb(), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(smp_mb(), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(smp_wmb(), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(smp_rmb(), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(dma_wmb(), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(dma_rmb(), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(smp_mb__before_atomic(), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(smp_mb__after_atomic(), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(smp_mb__after_spinlock(), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(smp_store_mb(test_var, 0), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(smp_load_acquire(&test_var), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(smp_store_release(&test_var, 0), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(xchg(&test_var, 0), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(xchg_release(&test_var, 0), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(xchg_relaxed(&test_var, 0), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(cmpxchg(&test_var, 0, 0), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(cmpxchg_release(&test_var, 0, 0), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_read(&dummy), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_read_acquire(&dummy), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_set(&dummy, 0), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_set_release(&dummy, 0), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_add(1, &dummy), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_add_return(1, &dummy), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_add_return_acquire(1, &dummy), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_add_return_release(1, &dummy), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add(1, &dummy), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_release(1, &dummy), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(test_and_set_bit(0, &test_var), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(test_and_clear_bit(0, &test_var), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(test_and_change_bit(0, &test_var), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock(0, &test_var), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(__clear_bit_unlock(0, &test_var), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(arch_spin_lock(&arch_spinlock), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(arch_spin_unlock(&arch_spinlock), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(spin_lock(&test_spinlock), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(spin_unlock(&test_spinlock), true);
|
||||
KCSAN_EXPECT_READ_BARRIER(mutex_lock(&test_mutex), false);
|
||||
KCSAN_EXPECT_READ_BARRIER(mutex_unlock(&test_mutex), true);
|
||||
|
||||
KCSAN_EXPECT_WRITE_BARRIER(mb(), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(wmb(), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(rmb(), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(smp_mb(), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(smp_wmb(), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(smp_rmb(), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(dma_wmb(), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(dma_rmb(), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(smp_mb__before_atomic(), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_atomic(), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(smp_mb__after_spinlock(), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(smp_store_mb(test_var, 0), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(smp_load_acquire(&test_var), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(smp_store_release(&test_var, 0), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(xchg(&test_var, 0), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(xchg_release(&test_var, 0), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(xchg_relaxed(&test_var, 0), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(cmpxchg(&test_var, 0, 0), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_release(&test_var, 0, 0), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_read(&dummy), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_read_acquire(&dummy), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_set(&dummy, 0), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_set_release(&dummy, 0), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_add(1, &dummy), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return(1, &dummy), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_acquire(1, &dummy), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_release(1, &dummy), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add(1, &dummy), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_release(1, &dummy), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(test_and_set_bit(0, &test_var), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(test_and_clear_bit(0, &test_var), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(test_and_change_bit(0, &test_var), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock(0, &test_var), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(__clear_bit_unlock(0, &test_var), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(arch_spin_lock(&arch_spinlock), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(spin_lock(&test_spinlock), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(spin_unlock(&test_spinlock), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(mutex_lock(&test_mutex), false);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(mutex_unlock(&test_mutex), true);
|
||||
|
||||
KCSAN_EXPECT_RW_BARRIER(mb(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(wmb(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(rmb(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(smp_mb(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(smp_wmb(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(smp_rmb(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(dma_wmb(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(dma_rmb(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(smp_mb__before_atomic(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(smp_mb__after_atomic(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(smp_mb__after_spinlock(), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(smp_store_mb(test_var, 0), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(smp_load_acquire(&test_var), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(smp_store_release(&test_var, 0), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(xchg(&test_var, 0), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(xchg_release(&test_var, 0), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(xchg_relaxed(&test_var, 0), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(cmpxchg(&test_var, 0, 0), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(cmpxchg_release(&test_var, 0, 0), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(cmpxchg_relaxed(&test_var, 0, 0), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_read(&dummy), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_read_acquire(&dummy), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_set(&dummy, 0), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_set_release(&dummy, 0), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_add(1, &dummy), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_add_return(1, &dummy), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_add_return_acquire(1, &dummy), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_add_return_release(1, &dummy), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_add_return_relaxed(1, &dummy), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add(1, &dummy), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_acquire(1, &dummy), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_release(1, &dummy), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(atomic_fetch_add_relaxed(1, &dummy), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(test_and_set_bit(0, &test_var), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(test_and_clear_bit(0, &test_var), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(test_and_change_bit(0, &test_var), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock(0, &test_var), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(__clear_bit_unlock(0, &test_var), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(arch_spin_lock(&arch_spinlock), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(arch_spin_unlock(&arch_spinlock), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(spin_lock(&test_spinlock), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(spin_unlock(&test_spinlock), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(mutex_lock(&test_mutex), false);
|
||||
KCSAN_EXPECT_RW_BARRIER(mutex_unlock(&test_mutex), true);
|
||||
|
||||
#ifdef clear_bit_unlock_is_negative_byte
|
||||
KCSAN_EXPECT_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
|
||||
KCSAN_EXPECT_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
|
||||
KCSAN_EXPECT_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var), true);
|
||||
#endif
|
||||
kcsan_nestable_atomic_end();
|
||||
}
|
||||
|
||||
/* Simple test with normal data race. */
|
||||
__no_kcsan
|
||||
static void test_basic(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
},
|
||||
};
|
||||
static const struct expect_report never = {
|
||||
struct expect_report never = {
|
||||
.access = {
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
@ -469,14 +738,14 @@ static void test_basic(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_concurrent_races(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
/* NULL will match any address. */
|
||||
{ test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
|
||||
{ test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) },
|
||||
},
|
||||
};
|
||||
static const struct expect_report never = {
|
||||
struct expect_report never = {
|
||||
.access = {
|
||||
{ test_kernel_rmw_array, NULL, 0, 0 },
|
||||
{ test_kernel_rmw_array, NULL, 0, 0 },
|
||||
@ -498,13 +767,13 @@ static void test_concurrent_races(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_novalue_change(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect_rw = {
|
||||
struct expect_report expect_rw = {
|
||||
.access = {
|
||||
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
},
|
||||
};
|
||||
const struct expect_report expect_ww = {
|
||||
struct expect_report expect_ww = {
|
||||
.access = {
|
||||
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
@ -530,13 +799,13 @@ static void test_novalue_change(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_novalue_change_exception(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect_rw = {
|
||||
struct expect_report expect_rw = {
|
||||
.access = {
|
||||
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
},
|
||||
};
|
||||
const struct expect_report expect_ww = {
|
||||
struct expect_report expect_ww = {
|
||||
.access = {
|
||||
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_write_nochange_rcu, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
@ -556,7 +825,7 @@ static void test_novalue_change_exception(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_unknown_origin(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
{ NULL },
|
||||
@ -578,7 +847,7 @@ static void test_unknown_origin(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_write_write_assume_atomic(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_write, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
@ -604,7 +873,7 @@ static void test_write_write_assume_atomic(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_write_write_struct(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
|
||||
@ -626,7 +895,7 @@ static void test_write_write_struct(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_write_write_struct_part(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_write_struct_part, &test_struct.val[3], sizeof(test_struct.val[3]), KCSAN_ACCESS_WRITE },
|
||||
@ -658,7 +927,7 @@ static void test_read_atomic_write_atomic(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_read_plain_atomic_write(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
{ test_kernel_write_atomic, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
|
||||
@ -679,7 +948,7 @@ static void test_read_plain_atomic_write(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_read_plain_atomic_rmw(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
{ test_kernel_atomic_rmw, &test_var, sizeof(test_var),
|
||||
@ -701,13 +970,13 @@ static void test_read_plain_atomic_rmw(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_zero_size_access(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
|
||||
},
|
||||
};
|
||||
const struct expect_report never = {
|
||||
struct expect_report never = {
|
||||
.access = {
|
||||
{ test_kernel_write_struct, &test_struct, sizeof(test_struct), KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_read_struct_zero_size, &test_struct.val[3], 0, 0 },
|
||||
@ -741,7 +1010,7 @@ static void test_data_race(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_assert_exclusive_writer(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
|
||||
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
@ -759,7 +1028,7 @@ static void test_assert_exclusive_writer(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_assert_exclusive_access(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
@ -777,19 +1046,19 @@ static void test_assert_exclusive_access(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_assert_exclusive_access_writer(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect_access_writer = {
|
||||
struct expect_report expect_access_writer = {
|
||||
.access = {
|
||||
{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
|
||||
},
|
||||
};
|
||||
const struct expect_report expect_access_access = {
|
||||
struct expect_report expect_access_access = {
|
||||
.access = {
|
||||
{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
|
||||
{ test_kernel_assert_access, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE },
|
||||
},
|
||||
};
|
||||
const struct expect_report never = {
|
||||
struct expect_report never = {
|
||||
.access = {
|
||||
{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
|
||||
{ test_kernel_assert_writer, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
|
||||
@ -813,7 +1082,7 @@ static void test_assert_exclusive_access_writer(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_assert_exclusive_bits_change(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_assert_bits_change, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT },
|
||||
{ test_kernel_change_bits, &test_var, sizeof(test_var),
|
||||
@ -844,13 +1113,13 @@ static void test_assert_exclusive_bits_nochange(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_assert_exclusive_writer_scoped(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect_start = {
|
||||
struct expect_report expect_start = {
|
||||
.access = {
|
||||
{ test_kernel_assert_writer_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
|
||||
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
},
|
||||
};
|
||||
const struct expect_report expect_inscope = {
|
||||
struct expect_report expect_inscope = {
|
||||
.access = {
|
||||
{ test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_SCOPED },
|
||||
{ test_kernel_write_nochange, &test_var, sizeof(test_var), KCSAN_ACCESS_WRITE },
|
||||
@ -871,16 +1140,16 @@ static void test_assert_exclusive_writer_scoped(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_assert_exclusive_access_scoped(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect_start1 = {
|
||||
struct expect_report expect_start1 = {
|
||||
.access = {
|
||||
{ test_kernel_assert_access_scoped, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED },
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
},
|
||||
};
|
||||
const struct expect_report expect_start2 = {
|
||||
struct expect_report expect_start2 = {
|
||||
.access = { expect_start1.access[0], expect_start1.access[0] },
|
||||
};
|
||||
const struct expect_report expect_inscope = {
|
||||
struct expect_report expect_inscope = {
|
||||
.access = {
|
||||
{ test_enter_scope, &test_var, sizeof(test_var), KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_SCOPED },
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
@ -985,7 +1254,7 @@ static void test_atomic_builtins(struct kunit *test)
|
||||
__no_kcsan
|
||||
static void test_1bit_value_change(struct kunit *test)
|
||||
{
|
||||
const struct expect_report expect = {
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||
{ test_kernel_xor_1bit, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
|
||||
@ -1005,6 +1274,90 @@ static void test_1bit_value_change(struct kunit *test)
|
||||
KUNIT_EXPECT_TRUE(test, match);
|
||||
}
|
||||
|
||||
__no_kcsan
|
||||
static void test_correct_barrier(struct kunit *test)
|
||||
{
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
|
||||
{ test_kernel_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
|
||||
},
|
||||
};
|
||||
bool match_expect = false;
|
||||
|
||||
test_struct.val[0] = 0; /* init unlocked */
|
||||
begin_test_checks(test_kernel_with_memorder, test_kernel_with_memorder);
|
||||
do {
|
||||
match_expect = report_matches_any_reordered(&expect);
|
||||
} while (!end_test_checks(match_expect));
|
||||
KUNIT_EXPECT_FALSE(test, match_expect);
|
||||
}
|
||||
|
||||
__no_kcsan
|
||||
static void test_missing_barrier(struct kunit *test)
|
||||
{
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
|
||||
{ test_kernel_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
|
||||
},
|
||||
};
|
||||
bool match_expect = false;
|
||||
|
||||
test_struct.val[0] = 0; /* init unlocked */
|
||||
begin_test_checks(test_kernel_wrong_memorder, test_kernel_wrong_memorder);
|
||||
do {
|
||||
match_expect = report_matches_any_reordered(&expect);
|
||||
} while (!end_test_checks(match_expect));
|
||||
if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
|
||||
KUNIT_EXPECT_TRUE(test, match_expect);
|
||||
else
|
||||
KUNIT_EXPECT_FALSE(test, match_expect);
|
||||
}
|
||||
|
||||
__no_kcsan
|
||||
static void test_atomic_builtins_correct_barrier(struct kunit *test)
|
||||
{
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
|
||||
{ test_kernel_atomic_builtin_with_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
|
||||
},
|
||||
};
|
||||
bool match_expect = false;
|
||||
|
||||
test_struct.val[0] = 0; /* init unlocked */
|
||||
begin_test_checks(test_kernel_atomic_builtin_with_memorder,
|
||||
test_kernel_atomic_builtin_with_memorder);
|
||||
do {
|
||||
match_expect = report_matches_any_reordered(&expect);
|
||||
} while (!end_test_checks(match_expect));
|
||||
KUNIT_EXPECT_FALSE(test, match_expect);
|
||||
}
|
||||
|
||||
__no_kcsan
|
||||
static void test_atomic_builtins_missing_barrier(struct kunit *test)
|
||||
{
|
||||
struct expect_report expect = {
|
||||
.access = {
|
||||
{ test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
|
||||
{ test_kernel_atomic_builtin_wrong_memorder, &test_var, sizeof(test_var), __KCSAN_ACCESS_RW(0) },
|
||||
},
|
||||
};
|
||||
bool match_expect = false;
|
||||
|
||||
test_struct.val[0] = 0; /* init unlocked */
|
||||
begin_test_checks(test_kernel_atomic_builtin_wrong_memorder,
|
||||
test_kernel_atomic_builtin_wrong_memorder);
|
||||
do {
|
||||
match_expect = report_matches_any_reordered(&expect);
|
||||
} while (!end_test_checks(match_expect));
|
||||
if (IS_ENABLED(CONFIG_KCSAN_WEAK_MEMORY))
|
||||
KUNIT_EXPECT_TRUE(test, match_expect);
|
||||
else
|
||||
KUNIT_EXPECT_FALSE(test, match_expect);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generate thread counts for all test cases. Values generated are in interval
|
||||
* [2, 5] followed by exponentially increasing thread counts from 8 to 32.
|
||||
@ -1054,6 +1407,7 @@ static const void *nthreads_gen_params(const void *prev, char *desc)
|
||||
|
||||
#define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, nthreads_gen_params)
|
||||
static struct kunit_case kcsan_test_cases[] = {
|
||||
KUNIT_CASE(test_barrier_nothreads),
|
||||
KCSAN_KUNIT_CASE(test_basic),
|
||||
KCSAN_KUNIT_CASE(test_concurrent_races),
|
||||
KCSAN_KUNIT_CASE(test_novalue_change),
|
||||
@ -1078,6 +1432,10 @@ static struct kunit_case kcsan_test_cases[] = {
|
||||
KCSAN_KUNIT_CASE(test_seqlock_noreport),
|
||||
KCSAN_KUNIT_CASE(test_atomic_builtins),
|
||||
KCSAN_KUNIT_CASE(test_1bit_value_change),
|
||||
KCSAN_KUNIT_CASE(test_correct_barrier),
|
||||
KCSAN_KUNIT_CASE(test_missing_barrier),
|
||||
KCSAN_KUNIT_CASE(test_atomic_builtins_correct_barrier),
|
||||
KCSAN_KUNIT_CASE(test_atomic_builtins_missing_barrier),
|
||||
{},
|
||||
};
|
||||
|
||||
@ -1142,6 +1500,9 @@ static int test_init(struct kunit *test)
|
||||
observed.nlines = 0;
|
||||
spin_unlock_irqrestore(&observed.lock, flags);
|
||||
|
||||
if (strstr(test->name, "nothreads"))
|
||||
return 0;
|
||||
|
||||
if (!torture_init_begin((char *)test->name, 1))
|
||||
return -EBUSY;
|
||||
|
||||
@ -1184,6 +1545,9 @@ static void test_exit(struct kunit *test)
|
||||
struct task_struct **stop_thread;
|
||||
int i;
|
||||
|
||||
if (strstr(test->name, "nothreads"))
|
||||
return;
|
||||
|
||||
if (torture_cleanup_begin())
|
||||
return;
|
||||
|
||||
|
@ -215,9 +215,9 @@ static const char *get_access_type(int type)
|
||||
if (type & KCSAN_ACCESS_ASSERT) {
|
||||
if (type & KCSAN_ACCESS_SCOPED) {
|
||||
if (type & KCSAN_ACCESS_WRITE)
|
||||
return "assert no accesses (scoped)";
|
||||
return "assert no accesses (reordered)";
|
||||
else
|
||||
return "assert no writes (scoped)";
|
||||
return "assert no writes (reordered)";
|
||||
} else {
|
||||
if (type & KCSAN_ACCESS_WRITE)
|
||||
return "assert no accesses";
|
||||
@ -240,17 +240,17 @@ static const char *get_access_type(int type)
|
||||
case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
|
||||
return "read-write (marked)";
|
||||
case KCSAN_ACCESS_SCOPED:
|
||||
return "read (scoped)";
|
||||
return "read (reordered)";
|
||||
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
|
||||
return "read (marked, scoped)";
|
||||
return "read (marked, reordered)";
|
||||
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE:
|
||||
return "write (scoped)";
|
||||
return "write (reordered)";
|
||||
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
|
||||
return "write (marked, scoped)";
|
||||
return "write (marked, reordered)";
|
||||
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
|
||||
return "read-write (scoped)";
|
||||
return "read-write (reordered)";
|
||||
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
|
||||
return "read-write (marked, scoped)";
|
||||
return "read-write (marked, reordered)";
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -308,10 +308,12 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
|
||||
|
||||
/*
|
||||
* Skips to the first entry that matches the function of @ip, and then replaces
|
||||
* that entry with @ip, returning the entries to skip.
|
||||
* that entry with @ip, returning the entries to skip with @replaced containing
|
||||
* the replaced entry.
|
||||
*/
|
||||
static int
|
||||
replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip)
|
||||
replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned long ip,
|
||||
unsigned long *replaced)
|
||||
{
|
||||
unsigned long symbolsize, offset;
|
||||
unsigned long target_func;
|
||||
@ -330,6 +332,7 @@ replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned lon
|
||||
func -= offset;
|
||||
|
||||
if (func == target_func) {
|
||||
*replaced = stack_entries[skip];
|
||||
stack_entries[skip] = ip;
|
||||
return skip;
|
||||
}
|
||||
@ -342,9 +345,10 @@ replace_stack_entry(unsigned long stack_entries[], int num_entries, unsigned lon
|
||||
}
|
||||
|
||||
static int
|
||||
sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip)
|
||||
sanitize_stack_entries(unsigned long stack_entries[], int num_entries, unsigned long ip,
|
||||
unsigned long *replaced)
|
||||
{
|
||||
return ip ? replace_stack_entry(stack_entries, num_entries, ip) :
|
||||
return ip ? replace_stack_entry(stack_entries, num_entries, ip, replaced) :
|
||||
get_stack_skipnr(stack_entries, num_entries);
|
||||
}
|
||||
|
||||
@ -360,6 +364,14 @@ static int sym_strcmp(void *addr1, void *addr2)
|
||||
return strncmp(buf1, buf2, sizeof(buf1));
|
||||
}
|
||||
|
||||
static void
|
||||
print_stack_trace(unsigned long stack_entries[], int num_entries, unsigned long reordered_to)
|
||||
{
|
||||
stack_trace_print(stack_entries, num_entries, 0);
|
||||
if (reordered_to)
|
||||
pr_err(" |\n +-> reordered to: %pS\n", (void *)reordered_to);
|
||||
}
|
||||
|
||||
static void print_verbose_info(struct task_struct *task)
|
||||
{
|
||||
if (!task)
|
||||
@ -378,10 +390,12 @@ static void print_report(enum kcsan_value_change value_change,
|
||||
struct other_info *other_info,
|
||||
u64 old, u64 new, u64 mask)
|
||||
{
|
||||
unsigned long reordered_to = 0;
|
||||
unsigned long stack_entries[NUM_STACK_ENTRIES] = { 0 };
|
||||
int num_stack_entries = stack_trace_save(stack_entries, NUM_STACK_ENTRIES, 1);
|
||||
int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip);
|
||||
int skipnr = sanitize_stack_entries(stack_entries, num_stack_entries, ai->ip, &reordered_to);
|
||||
unsigned long this_frame = stack_entries[skipnr];
|
||||
unsigned long other_reordered_to = 0;
|
||||
unsigned long other_frame = 0;
|
||||
int other_skipnr = 0; /* silence uninit warnings */
|
||||
|
||||
@ -394,7 +408,7 @@ static void print_report(enum kcsan_value_change value_change,
|
||||
if (other_info) {
|
||||
other_skipnr = sanitize_stack_entries(other_info->stack_entries,
|
||||
other_info->num_stack_entries,
|
||||
other_info->ai.ip);
|
||||
other_info->ai.ip, &other_reordered_to);
|
||||
other_frame = other_info->stack_entries[other_skipnr];
|
||||
|
||||
/* @value_change is only known for the other thread */
|
||||
@ -434,10 +448,9 @@ static void print_report(enum kcsan_value_change value_change,
|
||||
other_info->ai.cpu_id);
|
||||
|
||||
/* Print the other thread's stack trace. */
|
||||
stack_trace_print(other_info->stack_entries + other_skipnr,
|
||||
print_stack_trace(other_info->stack_entries + other_skipnr,
|
||||
other_info->num_stack_entries - other_skipnr,
|
||||
0);
|
||||
|
||||
other_reordered_to);
|
||||
if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
|
||||
print_verbose_info(other_info->task);
|
||||
|
||||
@ -451,9 +464,7 @@ static void print_report(enum kcsan_value_change value_change,
|
||||
get_thread_desc(ai->task_pid), ai->cpu_id);
|
||||
}
|
||||
/* Print stack trace of this thread. */
|
||||
stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
|
||||
0);
|
||||
|
||||
print_stack_trace(stack_entries + skipnr, num_stack_entries - skipnr, reordered_to);
|
||||
if (IS_ENABLED(CONFIG_KCSAN_VERBOSE))
|
||||
print_verbose_info(current);
|
||||
|
||||
|
@ -7,10 +7,15 @@
|
||||
|
||||
#define pr_fmt(fmt) "kcsan: " fmt
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kcsan-checks.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "encoding.h"
|
||||
@ -103,6 +108,143 @@ static bool __init test_matching_access(void)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Correct memory barrier instrumentation is critical to avoiding false
|
||||
* positives: simple test to check at boot certain barriers are always properly
|
||||
* instrumented. See kcsan_test for a more complete test.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(test_spinlock);
|
||||
static bool __init test_barrier(void)
|
||||
{
|
||||
#ifdef CONFIG_KCSAN_WEAK_MEMORY
|
||||
struct kcsan_scoped_access *reorder_access = ¤t->kcsan_ctx.reorder_access;
|
||||
#else
|
||||
struct kcsan_scoped_access *reorder_access = NULL;
|
||||
#endif
|
||||
bool ret = true;
|
||||
arch_spinlock_t arch_spinlock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
atomic_t dummy;
|
||||
long test_var;
|
||||
|
||||
if (!reorder_access || !IS_ENABLED(CONFIG_SMP))
|
||||
return true;
|
||||
|
||||
#define __KCSAN_CHECK_BARRIER(access_type, barrier, name) \
|
||||
do { \
|
||||
reorder_access->type = (access_type) | KCSAN_ACCESS_SCOPED; \
|
||||
reorder_access->size = 1; \
|
||||
barrier; \
|
||||
if (reorder_access->size != 0) { \
|
||||
pr_err("improperly instrumented type=(" #access_type "): " name "\n"); \
|
||||
ret = false; \
|
||||
} \
|
||||
} while (0)
|
||||
#define KCSAN_CHECK_READ_BARRIER(b) __KCSAN_CHECK_BARRIER(0, b, #b)
|
||||
#define KCSAN_CHECK_WRITE_BARRIER(b) __KCSAN_CHECK_BARRIER(KCSAN_ACCESS_WRITE, b, #b)
|
||||
#define KCSAN_CHECK_RW_BARRIER(b) __KCSAN_CHECK_BARRIER(KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND, b, #b)
|
||||
|
||||
kcsan_nestable_atomic_begin(); /* No watchpoints in called functions. */
|
||||
|
||||
KCSAN_CHECK_READ_BARRIER(mb());
|
||||
KCSAN_CHECK_READ_BARRIER(rmb());
|
||||
KCSAN_CHECK_READ_BARRIER(smp_mb());
|
||||
KCSAN_CHECK_READ_BARRIER(smp_rmb());
|
||||
KCSAN_CHECK_READ_BARRIER(dma_rmb());
|
||||
KCSAN_CHECK_READ_BARRIER(smp_mb__before_atomic());
|
||||
KCSAN_CHECK_READ_BARRIER(smp_mb__after_atomic());
|
||||
KCSAN_CHECK_READ_BARRIER(smp_mb__after_spinlock());
|
||||
KCSAN_CHECK_READ_BARRIER(smp_store_mb(test_var, 0));
|
||||
KCSAN_CHECK_READ_BARRIER(smp_store_release(&test_var, 0));
|
||||
KCSAN_CHECK_READ_BARRIER(xchg(&test_var, 0));
|
||||
KCSAN_CHECK_READ_BARRIER(xchg_release(&test_var, 0));
|
||||
KCSAN_CHECK_READ_BARRIER(cmpxchg(&test_var, 0, 0));
|
||||
KCSAN_CHECK_READ_BARRIER(cmpxchg_release(&test_var, 0, 0));
|
||||
KCSAN_CHECK_READ_BARRIER(atomic_set_release(&dummy, 0));
|
||||
KCSAN_CHECK_READ_BARRIER(atomic_add_return(1, &dummy));
|
||||
KCSAN_CHECK_READ_BARRIER(atomic_add_return_release(1, &dummy));
|
||||
KCSAN_CHECK_READ_BARRIER(atomic_fetch_add(1, &dummy));
|
||||
KCSAN_CHECK_READ_BARRIER(atomic_fetch_add_release(1, &dummy));
|
||||
KCSAN_CHECK_READ_BARRIER(test_and_set_bit(0, &test_var));
|
||||
KCSAN_CHECK_READ_BARRIER(test_and_clear_bit(0, &test_var));
|
||||
KCSAN_CHECK_READ_BARRIER(test_and_change_bit(0, &test_var));
|
||||
KCSAN_CHECK_READ_BARRIER(clear_bit_unlock(0, &test_var));
|
||||
KCSAN_CHECK_READ_BARRIER(__clear_bit_unlock(0, &test_var));
|
||||
arch_spin_lock(&arch_spinlock);
|
||||
KCSAN_CHECK_READ_BARRIER(arch_spin_unlock(&arch_spinlock));
|
||||
spin_lock(&test_spinlock);
|
||||
KCSAN_CHECK_READ_BARRIER(spin_unlock(&test_spinlock));
|
||||
|
||||
KCSAN_CHECK_WRITE_BARRIER(mb());
|
||||
KCSAN_CHECK_WRITE_BARRIER(wmb());
|
||||
KCSAN_CHECK_WRITE_BARRIER(smp_mb());
|
||||
KCSAN_CHECK_WRITE_BARRIER(smp_wmb());
|
||||
KCSAN_CHECK_WRITE_BARRIER(dma_wmb());
|
||||
KCSAN_CHECK_WRITE_BARRIER(smp_mb__before_atomic());
|
||||
KCSAN_CHECK_WRITE_BARRIER(smp_mb__after_atomic());
|
||||
KCSAN_CHECK_WRITE_BARRIER(smp_mb__after_spinlock());
|
||||
KCSAN_CHECK_WRITE_BARRIER(smp_store_mb(test_var, 0));
|
||||
KCSAN_CHECK_WRITE_BARRIER(smp_store_release(&test_var, 0));
|
||||
KCSAN_CHECK_WRITE_BARRIER(xchg(&test_var, 0));
|
||||
KCSAN_CHECK_WRITE_BARRIER(xchg_release(&test_var, 0));
|
||||
KCSAN_CHECK_WRITE_BARRIER(cmpxchg(&test_var, 0, 0));
|
||||
KCSAN_CHECK_WRITE_BARRIER(cmpxchg_release(&test_var, 0, 0));
|
||||
KCSAN_CHECK_WRITE_BARRIER(atomic_set_release(&dummy, 0));
|
||||
KCSAN_CHECK_WRITE_BARRIER(atomic_add_return(1, &dummy));
|
||||
KCSAN_CHECK_WRITE_BARRIER(atomic_add_return_release(1, &dummy));
|
||||
KCSAN_CHECK_WRITE_BARRIER(atomic_fetch_add(1, &dummy));
|
||||
KCSAN_CHECK_WRITE_BARRIER(atomic_fetch_add_release(1, &dummy));
|
||||
KCSAN_CHECK_WRITE_BARRIER(test_and_set_bit(0, &test_var));
|
||||
KCSAN_CHECK_WRITE_BARRIER(test_and_clear_bit(0, &test_var));
|
||||
KCSAN_CHECK_WRITE_BARRIER(test_and_change_bit(0, &test_var));
|
||||
KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock(0, &test_var));
|
||||
KCSAN_CHECK_WRITE_BARRIER(__clear_bit_unlock(0, &test_var));
|
||||
arch_spin_lock(&arch_spinlock);
|
||||
KCSAN_CHECK_WRITE_BARRIER(arch_spin_unlock(&arch_spinlock));
|
||||
spin_lock(&test_spinlock);
|
||||
KCSAN_CHECK_WRITE_BARRIER(spin_unlock(&test_spinlock));
|
||||
|
||||
KCSAN_CHECK_RW_BARRIER(mb());
|
||||
KCSAN_CHECK_RW_BARRIER(wmb());
|
||||
KCSAN_CHECK_RW_BARRIER(rmb());
|
||||
KCSAN_CHECK_RW_BARRIER(smp_mb());
|
||||
KCSAN_CHECK_RW_BARRIER(smp_wmb());
|
||||
KCSAN_CHECK_RW_BARRIER(smp_rmb());
|
||||
KCSAN_CHECK_RW_BARRIER(dma_wmb());
|
||||
KCSAN_CHECK_RW_BARRIER(dma_rmb());
|
||||
KCSAN_CHECK_RW_BARRIER(smp_mb__before_atomic());
|
||||
KCSAN_CHECK_RW_BARRIER(smp_mb__after_atomic());
|
||||
KCSAN_CHECK_RW_BARRIER(smp_mb__after_spinlock());
|
||||
KCSAN_CHECK_RW_BARRIER(smp_store_mb(test_var, 0));
|
||||
KCSAN_CHECK_RW_BARRIER(smp_store_release(&test_var, 0));
|
||||
KCSAN_CHECK_RW_BARRIER(xchg(&test_var, 0));
|
||||
KCSAN_CHECK_RW_BARRIER(xchg_release(&test_var, 0));
|
||||
KCSAN_CHECK_RW_BARRIER(cmpxchg(&test_var, 0, 0));
|
||||
KCSAN_CHECK_RW_BARRIER(cmpxchg_release(&test_var, 0, 0));
|
||||
KCSAN_CHECK_RW_BARRIER(atomic_set_release(&dummy, 0));
|
||||
KCSAN_CHECK_RW_BARRIER(atomic_add_return(1, &dummy));
|
||||
KCSAN_CHECK_RW_BARRIER(atomic_add_return_release(1, &dummy));
|
||||
KCSAN_CHECK_RW_BARRIER(atomic_fetch_add(1, &dummy));
|
||||
KCSAN_CHECK_RW_BARRIER(atomic_fetch_add_release(1, &dummy));
|
||||
KCSAN_CHECK_RW_BARRIER(test_and_set_bit(0, &test_var));
|
||||
KCSAN_CHECK_RW_BARRIER(test_and_clear_bit(0, &test_var));
|
||||
KCSAN_CHECK_RW_BARRIER(test_and_change_bit(0, &test_var));
|
||||
KCSAN_CHECK_RW_BARRIER(clear_bit_unlock(0, &test_var));
|
||||
KCSAN_CHECK_RW_BARRIER(__clear_bit_unlock(0, &test_var));
|
||||
arch_spin_lock(&arch_spinlock);
|
||||
KCSAN_CHECK_RW_BARRIER(arch_spin_unlock(&arch_spinlock));
|
||||
spin_lock(&test_spinlock);
|
||||
KCSAN_CHECK_RW_BARRIER(spin_unlock(&test_spinlock));
|
||||
|
||||
#ifdef clear_bit_unlock_is_negative_byte
|
||||
KCSAN_CHECK_RW_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
|
||||
KCSAN_CHECK_READ_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
|
||||
KCSAN_CHECK_WRITE_BARRIER(clear_bit_unlock_is_negative_byte(0, &test_var));
|
||||
#endif
|
||||
kcsan_nestable_atomic_end();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init kcsan_selftest(void)
|
||||
{
|
||||
int passed = 0;
|
||||
@ -120,6 +262,7 @@ static int __init kcsan_selftest(void)
|
||||
RUN_TEST(test_requires);
|
||||
RUN_TEST(test_encode_decode);
|
||||
RUN_TEST(test_matching_access);
|
||||
RUN_TEST(test_barrier);
|
||||
|
||||
pr_info("selftest: %d/%d tests passed\n", passed, total);
|
||||
if (passed != total)
|
||||
|
@ -11,11 +11,10 @@ ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
|
||||
# that is not a function of syscall inputs. E.g. involuntary context switches.
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
# There are numerous data races here, however, most of them are due to plain accesses.
|
||||
# This would make it even harder for syzbot to find reproducers, because these
|
||||
# bugs trigger without specific input. Disable by default, but should re-enable
|
||||
# eventually.
|
||||
# Disable KCSAN to avoid excessive noise and performance degradation. To avoid
|
||||
# false positives ensure barriers implied by sched functions are instrumented.
|
||||
KCSAN_SANITIZE := n
|
||||
KCSAN_INSTRUMENT_BARRIERS := y
|
||||
|
||||
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
|
||||
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
|
||||
|
@ -191,6 +191,26 @@ config KCSAN_STRICT
|
||||
closely aligns with the rules defined by the Linux-kernel memory
|
||||
consistency model (LKMM).
|
||||
|
||||
config KCSAN_WEAK_MEMORY
|
||||
bool "Enable weak memory modeling to detect missing memory barriers"
|
||||
default y
|
||||
depends on KCSAN_STRICT
|
||||
# We can either let objtool nop __tsan_func_{entry,exit}() and builtin
|
||||
# atomics instrumentation in .noinstr.text, or use a compiler that can
|
||||
# implement __no_kcsan to really remove all instrumentation.
|
||||
depends on STACK_VALIDATION || CC_IS_GCC || CLANG_VERSION >= 140000
|
||||
help
|
||||
Enable support for modeling a subset of weak memory, which allows
|
||||
detecting a subset of data races due to missing memory barriers.
|
||||
|
||||
Depends on KCSAN_STRICT, because the options strenghtening certain
|
||||
plain accesses by default (depending on !KCSAN_STRICT) reduce the
|
||||
ability to detect any data races invoving reordered accesses, in
|
||||
particular reordered writes.
|
||||
|
||||
Weak memory modeling relies on additional instrumentation and may
|
||||
affect performance.
|
||||
|
||||
config KCSAN_REPORT_VALUE_CHANGE_ONLY
|
||||
bool "Only report races where watcher observed a data value change"
|
||||
default y
|
||||
|
@ -15,6 +15,8 @@ KCSAN_SANITIZE_slab_common.o := n
|
||||
KCSAN_SANITIZE_slab.o := n
|
||||
KCSAN_SANITIZE_slub.o := n
|
||||
KCSAN_SANITIZE_page_alloc.o := n
|
||||
# But enable explicit instrumentation for memory barriers.
|
||||
KCSAN_INSTRUMENT_BARRIERS := y
|
||||
|
||||
# These files are disabled because they produce non-interesting and/or
|
||||
# flaky coverage that is not a function of syscall inputs. E.g. slab is out of
|
||||
|
@ -9,7 +9,18 @@ endif
|
||||
|
||||
# Keep most options here optional, to allow enabling more compilers if absence
|
||||
# of some options does not break KCSAN nor causes false positive reports.
|
||||
export CFLAGS_KCSAN := -fsanitize=thread \
|
||||
$(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) \
|
||||
kcsan-cflags := -fsanitize=thread -fno-optimize-sibling-calls \
|
||||
$(call cc-option,$(call cc-param,tsan-compound-read-before-write=1),$(call cc-option,$(call cc-param,tsan-instrument-read-before-write=1))) \
|
||||
$(call cc-param,tsan-distinguish-volatile=1)
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
# GCC started warning about operations unsupported by the TSan runtime. But
|
||||
# KCSAN != TSan, so just ignore these warnings.
|
||||
kcsan-cflags += -Wno-tsan
|
||||
endif
|
||||
|
||||
ifndef CONFIG_KCSAN_WEAK_MEMORY
|
||||
kcsan-cflags += $(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0))
|
||||
endif
|
||||
|
||||
export CFLAGS_KCSAN := $(kcsan-cflags)
|
||||
|
@ -182,6 +182,11 @@ ifeq ($(CONFIG_KCSAN),y)
|
||||
_c_flags += $(if $(patsubst n%,, \
|
||||
$(KCSAN_SANITIZE_$(basetarget).o)$(KCSAN_SANITIZE)y), \
|
||||
$(CFLAGS_KCSAN))
|
||||
# Some uninstrumented files provide implied barriers required to avoid false
|
||||
# positives: set KCSAN_INSTRUMENT_BARRIERS for barrier instrumentation only.
|
||||
_c_flags += $(if $(patsubst n%,, \
|
||||
$(KCSAN_INSTRUMENT_BARRIERS_$(basetarget).o)$(KCSAN_INSTRUMENT_BARRIERS)n), \
|
||||
-D__KCSAN_INSTRUMENT_BARRIERS__)
|
||||
endif
|
||||
|
||||
# $(srctree)/$(src) for including checkin headers from generated source files
|
||||
|
@ -34,6 +34,14 @@ gen_param_check()
|
||||
gen_params_checks()
|
||||
{
|
||||
local meta="$1"; shift
|
||||
local order="$1"; shift
|
||||
|
||||
if [ "${order}" = "_release" ]; then
|
||||
printf "\tkcsan_release();\n"
|
||||
elif [ -z "${order}" ] && ! meta_in "$meta" "slv"; then
|
||||
# RMW with return value is fully ordered
|
||||
printf "\tkcsan_mb();\n"
|
||||
fi
|
||||
|
||||
while [ "$#" -gt 0 ]; do
|
||||
gen_param_check "$meta" "$1"
|
||||
@ -56,7 +64,7 @@ gen_proto_order_variant()
|
||||
|
||||
local ret="$(gen_ret_type "${meta}" "${int}")"
|
||||
local params="$(gen_params "${int}" "${atomic}" "$@")"
|
||||
local checks="$(gen_params_checks "${meta}" "$@")"
|
||||
local checks="$(gen_params_checks "${meta}" "${order}" "$@")"
|
||||
local args="$(gen_args "$@")"
|
||||
local retstmt="$(gen_ret_stmt "${meta}")"
|
||||
|
||||
@ -75,29 +83,44 @@ EOF
|
||||
gen_xchg()
|
||||
{
|
||||
local xchg="$1"; shift
|
||||
local order="$1"; shift
|
||||
local mult="$1"; shift
|
||||
|
||||
kcsan_barrier=""
|
||||
if [ "${xchg%_local}" = "${xchg}" ]; then
|
||||
case "$order" in
|
||||
_release) kcsan_barrier="kcsan_release()" ;;
|
||||
"") kcsan_barrier="kcsan_mb()" ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ "${xchg%${xchg#try_cmpxchg}}" = "try_cmpxchg" ] ; then
|
||||
|
||||
cat <<EOF
|
||||
#define ${xchg}(ptr, oldp, ...) \\
|
||||
#define ${xchg}${order}(ptr, oldp, ...) \\
|
||||
({ \\
|
||||
typeof(ptr) __ai_ptr = (ptr); \\
|
||||
typeof(oldp) __ai_oldp = (oldp); \\
|
||||
EOF
|
||||
[ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n"
|
||||
cat <<EOF
|
||||
instrument_atomic_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
|
||||
instrument_atomic_write(__ai_oldp, ${mult}sizeof(*__ai_oldp)); \\
|
||||
arch_${xchg}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\
|
||||
arch_${xchg}${order}(__ai_ptr, __ai_oldp, __VA_ARGS__); \\
|
||||
})
|
||||
EOF
|
||||
|
||||
else
|
||||
|
||||
cat <<EOF
|
||||
#define ${xchg}(ptr, ...) \\
|
||||
#define ${xchg}${order}(ptr, ...) \\
|
||||
({ \\
|
||||
typeof(ptr) __ai_ptr = (ptr); \\
|
||||
EOF
|
||||
[ -n "$kcsan_barrier" ] && printf "\t${kcsan_barrier}; \\\\\n"
|
||||
cat <<EOF
|
||||
instrument_atomic_write(__ai_ptr, ${mult}sizeof(*__ai_ptr)); \\
|
||||
arch_${xchg}(__ai_ptr, __VA_ARGS__); \\
|
||||
arch_${xchg}${order}(__ai_ptr, __VA_ARGS__); \\
|
||||
})
|
||||
EOF
|
||||
|
||||
@ -145,21 +168,21 @@ done
|
||||
|
||||
for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do
|
||||
for order in "" "_acquire" "_release" "_relaxed"; do
|
||||
gen_xchg "${xchg}${order}" ""
|
||||
gen_xchg "${xchg}" "${order}" ""
|
||||
printf "\n"
|
||||
done
|
||||
done
|
||||
|
||||
for xchg in "cmpxchg_local" "cmpxchg64_local" "sync_cmpxchg"; do
|
||||
gen_xchg "${xchg}" ""
|
||||
gen_xchg "${xchg}" "" ""
|
||||
printf "\n"
|
||||
done
|
||||
|
||||
gen_xchg "cmpxchg_double" "2 * "
|
||||
gen_xchg "cmpxchg_double" "" "2 * "
|
||||
|
||||
printf "\n\n"
|
||||
|
||||
gen_xchg "cmpxchg_double_local" "2 * "
|
||||
gen_xchg "cmpxchg_double_local" "" "2 * "
|
||||
|
||||
cat <<EOF
|
||||
|
||||
|
@ -849,6 +849,10 @@ static const char *uaccess_safe_builtin[] = {
|
||||
"__asan_report_store16_noabort",
|
||||
/* KCSAN */
|
||||
"__kcsan_check_access",
|
||||
"__kcsan_mb",
|
||||
"__kcsan_wmb",
|
||||
"__kcsan_rmb",
|
||||
"__kcsan_release",
|
||||
"kcsan_found_watchpoint",
|
||||
"kcsan_setup_watchpoint",
|
||||
"kcsan_check_scoped_accesses",
|
||||
@ -1068,11 +1072,11 @@ static void annotate_call_site(struct objtool_file *file,
|
||||
}
|
||||
|
||||
/*
|
||||
* Many compilers cannot disable KCOV with a function attribute
|
||||
* so they need a little help, NOP out any KCOV calls from noinstr
|
||||
* text.
|
||||
* Many compilers cannot disable KCOV or sanitizer calls with a function
|
||||
* attribute so they need a little help, NOP out any such calls from
|
||||
* noinstr text.
|
||||
*/
|
||||
if (insn->sec->noinstr && sym->kcov) {
|
||||
if (insn->sec->noinstr && sym->profiling_func) {
|
||||
if (reloc) {
|
||||
reloc->type = R_NONE;
|
||||
elf_write_reloc(file->elf, reloc);
|
||||
@ -1987,6 +1991,31 @@ static int read_intra_function_calls(struct objtool_file *file)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if name matches an instrumentation function, where calls to that
|
||||
* function from noinstr code can safely be removed, but compilers won't do so.
|
||||
*/
|
||||
static bool is_profiling_func(const char *name)
|
||||
{
|
||||
/*
|
||||
* Many compilers cannot disable KCOV with a function attribute.
|
||||
*/
|
||||
if (!strncmp(name, "__sanitizer_cov_", 16))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Some compilers currently do not remove __tsan_func_entry/exit nor
|
||||
* __tsan_atomic_signal_fence (used for barrier instrumentation) with
|
||||
* the __no_sanitize_thread attribute, remove them. Once the kernel's
|
||||
* minimum Clang version is 14.0, this can be removed.
|
||||
*/
|
||||
if (!strncmp(name, "__tsan_func_", 12) ||
|
||||
!strcmp(name, "__tsan_atomic_signal_fence"))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int classify_symbols(struct objtool_file *file)
|
||||
{
|
||||
struct section *sec;
|
||||
@ -2007,8 +2036,8 @@ static int classify_symbols(struct objtool_file *file)
|
||||
if (!strcmp(func->name, "__fentry__"))
|
||||
func->fentry = true;
|
||||
|
||||
if (!strncmp(func->name, "__sanitizer_cov_", 16))
|
||||
func->kcov = true;
|
||||
if (is_profiling_func(func->name))
|
||||
func->profiling_func = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,7 +58,7 @@ struct symbol {
|
||||
u8 static_call_tramp : 1;
|
||||
u8 retpoline_thunk : 1;
|
||||
u8 fentry : 1;
|
||||
u8 kcov : 1;
|
||||
u8 profiling_func : 1;
|
||||
struct list_head pv_target;
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user