locking/atomic: treewide: use raw_atomic*_<op>()

Now that we have raw_atomic*_<op>() definitions, there's no need to use
arch_atomic*_<op>() definitions outside of the low-level atomic
definitions.

Move treewide users of arch_atomic*_<op>() over to the equivalent
raw_atomic*_<op>().

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-19-mark.rutland@arm.com
This commit is contained in:
Mark Rutland 2023-06-05 08:01:15 +01:00 committed by Peter Zijlstra
parent c9268ac615
commit 0f613bfa82
14 changed files with 42 additions and 42 deletions

View File

@ -417,9 +417,9 @@ noinstr static void nmi_ipi_lock_start(unsigned long *flags)
{ {
raw_local_irq_save(*flags); raw_local_irq_save(*flags);
hard_irq_disable(); hard_irq_disable();
while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
raw_local_irq_restore(*flags); raw_local_irq_restore(*flags);
spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
raw_local_irq_save(*flags); raw_local_irq_save(*flags);
hard_irq_disable(); hard_irq_disable();
} }
@ -427,15 +427,15 @@ noinstr static void nmi_ipi_lock_start(unsigned long *flags)
noinstr static void nmi_ipi_lock(void) noinstr static void nmi_ipi_lock(void)
{ {
while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
} }
noinstr static void nmi_ipi_unlock(void) noinstr static void nmi_ipi_unlock(void)
{ {
smp_mb(); smp_mb();
WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1); WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1);
arch_atomic_set(&__nmi_ipi_lock, 0); raw_atomic_set(&__nmi_ipi_lock, 0);
} }
noinstr static void nmi_ipi_unlock_end(unsigned long *flags) noinstr static void nmi_ipi_unlock_end(unsigned long *flags)

View File

@ -1799,7 +1799,7 @@ struct bp_patching_desc *try_get_desc(void)
{ {
struct bp_patching_desc *desc = &bp_desc; struct bp_patching_desc *desc = &bp_desc;
if (!arch_atomic_inc_not_zero(&desc->refs)) if (!raw_atomic_inc_not_zero(&desc->refs))
return NULL; return NULL;
return desc; return desc;
@ -1810,7 +1810,7 @@ static __always_inline void put_desc(void)
struct bp_patching_desc *desc = &bp_desc; struct bp_patching_desc *desc = &bp_desc;
smp_mb__before_atomic(); smp_mb__before_atomic();
arch_atomic_dec(&desc->refs); raw_atomic_dec(&desc->refs);
} }
static __always_inline void *text_poke_addr(struct text_poke_loc *tp) static __always_inline void *text_poke_addr(struct text_poke_loc *tp)

View File

@ -1022,12 +1022,12 @@ static noinstr int mce_start(int *no_way_out)
if (!timeout) if (!timeout)
return ret; return ret;
arch_atomic_add(*no_way_out, &global_nwo); raw_atomic_add(*no_way_out, &global_nwo);
/* /*
* Rely on the implied barrier below, such that global_nwo * Rely on the implied barrier below, such that global_nwo
* is updated before mce_callin. * is updated before mce_callin.
*/ */
order = arch_atomic_inc_return(&mce_callin); order = raw_atomic_inc_return(&mce_callin);
arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus); arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
/* Enable instrumentation around calls to external facilities */ /* Enable instrumentation around calls to external facilities */
@ -1036,10 +1036,10 @@ static noinstr int mce_start(int *no_way_out)
/* /*
* Wait for everyone. * Wait for everyone.
*/ */
while (arch_atomic_read(&mce_callin) != num_online_cpus()) { while (raw_atomic_read(&mce_callin) != num_online_cpus()) {
if (mce_timed_out(&timeout, if (mce_timed_out(&timeout,
"Timeout: Not all CPUs entered broadcast exception handler")) { "Timeout: Not all CPUs entered broadcast exception handler")) {
arch_atomic_set(&global_nwo, 0); raw_atomic_set(&global_nwo, 0);
goto out; goto out;
} }
ndelay(SPINUNIT); ndelay(SPINUNIT);
@ -1054,7 +1054,7 @@ static noinstr int mce_start(int *no_way_out)
/* /*
* Monarch: Starts executing now, the others wait. * Monarch: Starts executing now, the others wait.
*/ */
arch_atomic_set(&mce_executing, 1); raw_atomic_set(&mce_executing, 1);
} else { } else {
/* /*
* Subject: Now start the scanning loop one by one in * Subject: Now start the scanning loop one by one in
@ -1062,10 +1062,10 @@ static noinstr int mce_start(int *no_way_out)
* This way when there are any shared banks it will be * This way when there are any shared banks it will be
* only seen by one CPU before cleared, avoiding duplicates. * only seen by one CPU before cleared, avoiding duplicates.
*/ */
while (arch_atomic_read(&mce_executing) < order) { while (raw_atomic_read(&mce_executing) < order) {
if (mce_timed_out(&timeout, if (mce_timed_out(&timeout,
"Timeout: Subject CPUs unable to finish machine check processing")) { "Timeout: Subject CPUs unable to finish machine check processing")) {
arch_atomic_set(&global_nwo, 0); raw_atomic_set(&global_nwo, 0);
goto out; goto out;
} }
ndelay(SPINUNIT); ndelay(SPINUNIT);
@ -1075,7 +1075,7 @@ static noinstr int mce_start(int *no_way_out)
/* /*
* Cache the global no_way_out state. * Cache the global no_way_out state.
*/ */
*no_way_out = arch_atomic_read(&global_nwo); *no_way_out = raw_atomic_read(&global_nwo);
ret = order; ret = order;

View File

@ -496,7 +496,7 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
*/ */
sev_es_nmi_complete(); sev_es_nmi_complete();
if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) if (IS_ENABLED(CONFIG_NMI_CHECK_CPU))
arch_atomic_long_inc(&nsp->idt_calls); raw_atomic_long_inc(&nsp->idt_calls);
if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
return; return;

View File

@ -101,11 +101,11 @@ u64 __pvclock_clocksource_read(struct pvclock_vcpu_time_info *src, bool dowd)
* updating at the same time, and one of them could be slightly behind, * updating at the same time, and one of them could be slightly behind,
* making the assumption that last_value always go forward fail to hold. * making the assumption that last_value always go forward fail to hold.
*/ */
last = arch_atomic64_read(&last_value); last = raw_atomic64_read(&last_value);
do { do {
if (ret <= last) if (ret <= last)
return last; return last;
} while (!arch_atomic64_try_cmpxchg(&last_value, &last, ret)); } while (!raw_atomic64_try_cmpxchg(&last_value, &last, ret));
return ret; return ret;
} }

View File

@ -13155,7 +13155,7 @@ EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
{ {
return arch_atomic_read(&kvm->arch.assigned_device_count); return raw_atomic_read(&kvm->arch.assigned_device_count);
} }
EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);

View File

@ -15,21 +15,21 @@ static __always_inline void
arch_set_bit(unsigned int nr, volatile unsigned long *p) arch_set_bit(unsigned int nr, volatile unsigned long *p)
{ {
p += BIT_WORD(nr); p += BIT_WORD(nr);
arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); raw_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
} }
static __always_inline void static __always_inline void
arch_clear_bit(unsigned int nr, volatile unsigned long *p) arch_clear_bit(unsigned int nr, volatile unsigned long *p)
{ {
p += BIT_WORD(nr); p += BIT_WORD(nr);
arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); raw_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
} }
static __always_inline void static __always_inline void
arch_change_bit(unsigned int nr, volatile unsigned long *p) arch_change_bit(unsigned int nr, volatile unsigned long *p)
{ {
p += BIT_WORD(nr); p += BIT_WORD(nr);
arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); raw_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
} }
static __always_inline int static __always_inline int
@ -39,7 +39,7 @@ arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr); p += BIT_WORD(nr);
old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p); old = raw_atomic_long_fetch_or(mask, (atomic_long_t *)p);
return !!(old & mask); return !!(old & mask);
} }
@ -50,7 +50,7 @@ arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr); p += BIT_WORD(nr);
old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p); old = raw_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
return !!(old & mask); return !!(old & mask);
} }
@ -61,7 +61,7 @@ arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr); p += BIT_WORD(nr);
old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p); old = raw_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
return !!(old & mask); return !!(old & mask);
} }

View File

@ -25,7 +25,7 @@ arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
if (READ_ONCE(*p) & mask) if (READ_ONCE(*p) & mask)
return 1; return 1;
old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
return !!(old & mask); return !!(old & mask);
} }
@ -41,7 +41,7 @@ static __always_inline void
arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p) arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
{ {
p += BIT_WORD(nr); p += BIT_WORD(nr);
arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
} }
/** /**
@ -63,7 +63,7 @@ arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
p += BIT_WORD(nr); p += BIT_WORD(nr);
old = READ_ONCE(*p); old = READ_ONCE(*p);
old &= ~BIT_MASK(nr); old &= ~BIT_MASK(nr);
arch_atomic_long_set_release((atomic_long_t *)p, old); raw_atomic_long_set_release((atomic_long_t *)p, old);
} }
/** /**
@ -83,7 +83,7 @@ static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
unsigned long mask = BIT_MASK(nr); unsigned long mask = BIT_MASK(nr);
p += BIT_WORD(nr); p += BIT_WORD(nr);
old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
return !!(old & BIT(7)); return !!(old & BIT(7));
} }
#define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte #define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte

View File

@ -119,7 +119,7 @@ extern void ct_idle_exit(void);
*/ */
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{ {
return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
} }
/* /*
@ -128,7 +128,7 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
*/ */
static __always_inline unsigned long ct_state_inc(int incby) static __always_inline unsigned long ct_state_inc(int incby)
{ {
return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state)); return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
} }
static __always_inline bool warn_rcu_enter(void) static __always_inline bool warn_rcu_enter(void)

View File

@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct context_tracking, context_tracking);
#ifdef CONFIG_CONTEXT_TRACKING_USER #ifdef CONFIG_CONTEXT_TRACKING_USER
static __always_inline int __ct_state(void) static __always_inline int __ct_state(void)
{ {
return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK; return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
} }
#endif #endif

View File

@ -1071,7 +1071,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
*/ */
static __always_inline unsigned int num_online_cpus(void) static __always_inline unsigned int num_online_cpus(void)
{ {
return arch_atomic_read(&__num_online_cpus); return raw_atomic_read(&__num_online_cpus);
} }
#define num_possible_cpus() cpumask_weight(cpu_possible_mask) #define num_possible_cpus() cpumask_weight(cpu_possible_mask)
#define num_present_cpus() cpumask_weight(cpu_present_mask) #define num_present_cpus() cpumask_weight(cpu_present_mask)

View File

@ -257,7 +257,7 @@ extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
static __always_inline int static_key_count(struct static_key *key) static __always_inline int static_key_count(struct static_key *key)
{ {
return arch_atomic_read(&key->enabled); return raw_atomic_read(&key->enabled);
} }
static __always_inline void jump_label_init(void) static __always_inline void jump_label_init(void)

View File

@ -510,7 +510,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* In this we case we don't care about any concurrency/ordering. * In this we case we don't care about any concurrency/ordering.
*/ */
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
arch_atomic_set(&ct->state, state); raw_atomic_set(&ct->state, state);
} else { } else {
/* /*
* Even if context tracking is disabled on this CPU, because it's outside * Even if context tracking is disabled on this CPU, because it's outside
@ -527,7 +527,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
*/ */
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) { if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
/* Tracking for vtime only, no concurrent RCU EQS accounting */ /* Tracking for vtime only, no concurrent RCU EQS accounting */
arch_atomic_set(&ct->state, state); raw_atomic_set(&ct->state, state);
} else { } else {
/* /*
* Tracking for vtime and RCU EQS. Make sure we don't race * Tracking for vtime and RCU EQS. Make sure we don't race
@ -535,7 +535,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* RCU only requires RCU_DYNTICKS_IDX increments to be fully * RCU only requires RCU_DYNTICKS_IDX increments to be fully
* ordered. * ordered.
*/ */
arch_atomic_add(state, &ct->state); raw_atomic_add(state, &ct->state);
} }
} }
} }
@ -630,12 +630,12 @@ void noinstr __ct_user_exit(enum ctx_state state)
* In this we case we don't care about any concurrency/ordering. * In this we case we don't care about any concurrency/ordering.
*/ */
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
arch_atomic_set(&ct->state, CONTEXT_KERNEL); raw_atomic_set(&ct->state, CONTEXT_KERNEL);
} else { } else {
if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) { if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
/* Tracking for vtime only, no concurrent RCU EQS accounting */ /* Tracking for vtime only, no concurrent RCU EQS accounting */
arch_atomic_set(&ct->state, CONTEXT_KERNEL); raw_atomic_set(&ct->state, CONTEXT_KERNEL);
} else { } else {
/* /*
* Tracking for vtime and RCU EQS. Make sure we don't race * Tracking for vtime and RCU EQS. Make sure we don't race
@ -643,7 +643,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
* RCU only requires RCU_DYNTICKS_IDX increments to be fully * RCU only requires RCU_DYNTICKS_IDX increments to be fully
* ordered. * ordered.
*/ */
arch_atomic_sub(state, &ct->state); raw_atomic_sub(state, &ct->state);
} }
} }
} }

View File

@ -287,7 +287,7 @@ static __always_inline u64 sched_clock_local(struct sched_clock_data *scd)
clock = wrap_max(clock, min_clock); clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock); clock = wrap_min(clock, max_clock);
if (!arch_try_cmpxchg64(&scd->clock, &old_clock, clock)) if (!raw_try_cmpxchg64(&scd->clock, &old_clock, clock))
goto again; goto again;
return clock; return clock;