Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking/atomic: treewide: use raw_atomic*_<op>()

Now that we have raw_atomic*_<op>() definitions, there's no need to use
arch_atomic*_<op>() definitions outside of the low-level atomic
definitions.

Move treewide users of arch_atomic*_<op>() over to the equivalent
raw_atomic*_<op>().

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-19-mark.rutland@arm.com

authored by

Mark Rutland and committed by
Peter Zijlstra
0f613bfa c9268ac6

+42 -42
+6 -6
arch/powerpc/kernel/smp.c
··· 417 417 { 418 418 raw_local_irq_save(*flags); 419 419 hard_irq_disable(); 420 - while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 420 + while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { 421 421 raw_local_irq_restore(*flags); 422 - spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); 422 + spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0); 423 423 raw_local_irq_save(*flags); 424 424 hard_irq_disable(); 425 425 } ··· 427 427 428 428 noinstr static void nmi_ipi_lock(void) 429 429 { 430 - while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 431 - spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0); 430 + while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) 431 + spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0); 432 432 } 433 433 434 434 noinstr static void nmi_ipi_unlock(void) 435 435 { 436 436 smp_mb(); 437 - WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1); 438 - arch_atomic_set(&__nmi_ipi_lock, 0); 437 + WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1); 438 + raw_atomic_set(&__nmi_ipi_lock, 0); 439 439 } 440 440 441 441 noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
+2 -2
arch/x86/kernel/alternative.c
··· 1799 1799 { 1800 1800 struct bp_patching_desc *desc = &bp_desc; 1801 1801 1802 - if (!arch_atomic_inc_not_zero(&desc->refs)) 1802 + if (!raw_atomic_inc_not_zero(&desc->refs)) 1803 1803 return NULL; 1804 1804 1805 1805 return desc; ··· 1810 1810 struct bp_patching_desc *desc = &bp_desc; 1811 1811 1812 1812 smp_mb__before_atomic(); 1813 - arch_atomic_dec(&desc->refs); 1813 + raw_atomic_dec(&desc->refs); 1814 1814 } 1815 1815 1816 1816 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
+8 -8
arch/x86/kernel/cpu/mce/core.c
··· 1022 1022 if (!timeout) 1023 1023 return ret; 1024 1024 1025 - arch_atomic_add(*no_way_out, &global_nwo); 1025 + raw_atomic_add(*no_way_out, &global_nwo); 1026 1026 /* 1027 1027 * Rely on the implied barrier below, such that global_nwo 1028 1028 * is updated before mce_callin. 1029 1029 */ 1030 - order = arch_atomic_inc_return(&mce_callin); 1030 + order = raw_atomic_inc_return(&mce_callin); 1031 1031 arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus); 1032 1032 1033 1033 /* Enable instrumentation around calls to external facilities */ ··· 1036 1036 /* 1037 1037 * Wait for everyone. 1038 1038 */ 1039 - while (arch_atomic_read(&mce_callin) != num_online_cpus()) { 1039 + while (raw_atomic_read(&mce_callin) != num_online_cpus()) { 1040 1040 if (mce_timed_out(&timeout, 1041 1041 "Timeout: Not all CPUs entered broadcast exception handler")) { 1042 - arch_atomic_set(&global_nwo, 0); 1042 + raw_atomic_set(&global_nwo, 0); 1043 1043 goto out; 1044 1044 } 1045 1045 ndelay(SPINUNIT); ··· 1054 1054 /* 1055 1055 * Monarch: Starts executing now, the others wait. 1056 1056 */ 1057 - arch_atomic_set(&mce_executing, 1); 1057 + raw_atomic_set(&mce_executing, 1); 1058 1058 } else { 1059 1059 /* 1060 1060 * Subject: Now start the scanning loop one by one in ··· 1062 1062 * This way when there are any shared banks it will be 1063 1063 * only seen by one CPU before cleared, avoiding duplicates. 1064 1064 */ 1065 - while (arch_atomic_read(&mce_executing) < order) { 1065 + while (raw_atomic_read(&mce_executing) < order) { 1066 1066 if (mce_timed_out(&timeout, 1067 1067 "Timeout: Subject CPUs unable to finish machine check processing")) { 1068 - arch_atomic_set(&global_nwo, 0); 1068 + raw_atomic_set(&global_nwo, 0); 1069 1069 goto out; 1070 1070 } 1071 1071 ndelay(SPINUNIT); ··· 1075 1075 /* 1076 1076 * Cache the global no_way_out state. 1077 1077 */ 1078 - *no_way_out = arch_atomic_read(&global_nwo); 1078 + *no_way_out = raw_atomic_read(&global_nwo); 1079 1079 1080 1080 ret = order; 1081 1081
+1 -1
arch/x86/kernel/nmi.c
··· 496 496 */ 497 497 sev_es_nmi_complete(); 498 498 if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) 499 - arch_atomic_long_inc(&nsp->idt_calls); 499 + raw_atomic_long_inc(&nsp->idt_calls); 500 500 501 501 if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) 502 502 return;
+2 -2
arch/x86/kernel/pvclock.c
··· 101 101 * updating at the same time, and one of them could be slightly behind, 102 102 * making the assumption that last_value always go forward fail to hold. 103 103 */ 104 - last = arch_atomic64_read(&last_value); 104 + last = raw_atomic64_read(&last_value); 105 105 do { 106 106 if (ret <= last) 107 107 return last; 108 - } while (!arch_atomic64_try_cmpxchg(&last_value, &last, ret)); 108 + } while (!raw_atomic64_try_cmpxchg(&last_value, &last, ret)); 109 109 110 110 return ret; 111 111 }
+1 -1
arch/x86/kvm/x86.c
··· 13155 13155 13156 13156 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm) 13157 13157 { 13158 - return arch_atomic_read(&kvm->arch.assigned_device_count); 13158 + return raw_atomic_read(&kvm->arch.assigned_device_count); 13159 13159 } 13160 13160 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device); 13161 13161
+6 -6
include/asm-generic/bitops/atomic.h
··· 15 15 arch_set_bit(unsigned int nr, volatile unsigned long *p) 16 16 { 17 17 p += BIT_WORD(nr); 18 - arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); 18 + raw_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p); 19 19 } 20 20 21 21 static __always_inline void 22 22 arch_clear_bit(unsigned int nr, volatile unsigned long *p) 23 23 { 24 24 p += BIT_WORD(nr); 25 - arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); 25 + raw_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p); 26 26 } 27 27 28 28 static __always_inline void 29 29 arch_change_bit(unsigned int nr, volatile unsigned long *p) 30 30 { 31 31 p += BIT_WORD(nr); 32 - arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); 32 + raw_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p); 33 33 } 34 34 35 35 static __always_inline int ··· 39 39 unsigned long mask = BIT_MASK(nr); 40 40 41 41 p += BIT_WORD(nr); 42 - old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p); 42 + old = raw_atomic_long_fetch_or(mask, (atomic_long_t *)p); 43 43 return !!(old & mask); 44 44 } 45 45 ··· 50 50 unsigned long mask = BIT_MASK(nr); 51 51 52 52 p += BIT_WORD(nr); 53 - old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p); 53 + old = raw_atomic_long_fetch_andnot(mask, (atomic_long_t *)p); 54 54 return !!(old & mask); 55 55 } 56 56 ··· 61 61 unsigned long mask = BIT_MASK(nr); 62 62 63 63 p += BIT_WORD(nr); 64 - old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p); 64 + old = raw_atomic_long_fetch_xor(mask, (atomic_long_t *)p); 65 65 return !!(old & mask); 66 66 } 67 67
+4 -4
include/asm-generic/bitops/lock.h
··· 25 25 if (READ_ONCE(*p) & mask) 26 26 return 1; 27 27 28 - old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); 28 + old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p); 29 29 return !!(old & mask); 30 30 } 31 31 ··· 41 41 arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p) 42 42 { 43 43 p += BIT_WORD(nr); 44 - arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); 44 + raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p); 45 45 } 46 46 47 47 /** ··· 63 63 p += BIT_WORD(nr); 64 64 old = READ_ONCE(*p); 65 65 old &= ~BIT_MASK(nr); 66 - arch_atomic_long_set_release((atomic_long_t *)p, old); 66 + raw_atomic_long_set_release((atomic_long_t *)p, old); 67 67 } 68 68 69 69 /** ··· 83 83 unsigned long mask = BIT_MASK(nr); 84 84 85 85 p += BIT_WORD(nr); 86 - old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); 86 + old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p); 87 87 return !!(old & BIT(7)); 88 88 } 89 89 #define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
+2 -2
include/linux/context_tracking.h
··· 119 119 */ 120 120 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) 121 121 { 122 - return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); 122 + return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); 123 123 } 124 124 125 125 /* ··· 128 128 */ 129 129 static __always_inline unsigned long ct_state_inc(int incby) 130 130 { 131 - return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state)); 131 + return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state)); 132 132 } 133 133 134 134 static __always_inline bool warn_rcu_enter(void)
+1 -1
include/linux/context_tracking_state.h
··· 51 51 #ifdef CONFIG_CONTEXT_TRACKING_USER 52 52 static __always_inline int __ct_state(void) 53 53 { 54 - return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK; 54 + return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK; 55 55 } 56 56 #endif 57 57
+1 -1
include/linux/cpumask.h
··· 1071 1071 */ 1072 1072 static __always_inline unsigned int num_online_cpus(void) 1073 1073 { 1074 - return arch_atomic_read(&__num_online_cpus); 1074 + return raw_atomic_read(&__num_online_cpus); 1075 1075 } 1076 1076 #define num_possible_cpus() cpumask_weight(cpu_possible_mask) 1077 1077 #define num_present_cpus() cpumask_weight(cpu_present_mask)
+1 -1
include/linux/jump_label.h
··· 257 257 258 258 static __always_inline int static_key_count(struct static_key *key) 259 259 { 260 - return arch_atomic_read(&key->enabled); 260 + return raw_atomic_read(&key->enabled); 261 261 } 262 262 263 263 static __always_inline void jump_label_init(void)
+6 -6
kernel/context_tracking.c
··· 510 510 * In this we case we don't care about any concurrency/ordering. 511 511 */ 512 512 if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) 513 - arch_atomic_set(&ct->state, state); 513 + raw_atomic_set(&ct->state, state); 514 514 } else { 515 515 /* 516 516 * Even if context tracking is disabled on this CPU, because it's outside ··· 527 527 */ 528 528 if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) { 529 529 /* Tracking for vtime only, no concurrent RCU EQS accounting */ 530 - arch_atomic_set(&ct->state, state); 530 + raw_atomic_set(&ct->state, state); 531 531 } else { 532 532 /* 533 533 * Tracking for vtime and RCU EQS. Make sure we don't race ··· 535 535 * RCU only requires RCU_DYNTICKS_IDX increments to be fully 536 536 * ordered. 537 537 */ 538 - arch_atomic_add(state, &ct->state); 538 + raw_atomic_add(state, &ct->state); 539 539 } 540 540 } 541 541 } ··· 630 630 * In this we case we don't care about any concurrency/ordering. 631 631 */ 632 632 if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) 633 - arch_atomic_set(&ct->state, CONTEXT_KERNEL); 633 + raw_atomic_set(&ct->state, CONTEXT_KERNEL); 634 634 635 635 } else { 636 636 if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) { 637 637 /* Tracking for vtime only, no concurrent RCU EQS accounting */ 638 - arch_atomic_set(&ct->state, CONTEXT_KERNEL); 638 + raw_atomic_set(&ct->state, CONTEXT_KERNEL); 639 639 } else { 640 640 /* 641 641 * Tracking for vtime and RCU EQS. Make sure we don't race ··· 643 643 * RCU only requires RCU_DYNTICKS_IDX increments to be fully 644 644 * ordered. 645 645 */ 646 - arch_atomic_sub(state, &ct->state); 646 + raw_atomic_sub(state, &ct->state); 647 647 } 648 648 } 649 649 }
+1 -1
kernel/sched/clock.c
··· 287 287 clock = wrap_max(clock, min_clock); 288 288 clock = wrap_min(clock, max_clock); 289 289 290 - if (!arch_try_cmpxchg64(&scd->clock, &old_clock, clock)) 290 + if (!raw_try_cmpxchg64(&scd->clock, &old_clock, clock)) 291 291 goto again; 292 292 293 293 return clock;