locking/atomic: treewide: use raw_atomic*_<op>()
authorMark Rutland <mark.rutland@arm.com>
Mon, 5 Jun 2023 07:01:15 +0000 (08:01 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 5 Jun 2023 07:57:20 +0000 (09:57 +0200)
Now that we have raw_atomic*_<op>() definitions, there's no need to use
arch_atomic*_<op>() definitions outside of the low-level atomic
definitions.

Move treewide users of arch_atomic*_<op>() over to the equivalent
raw_atomic*_<op>().

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-19-mark.rutland@arm.com
14 files changed:
arch/powerpc/kernel/smp.c
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/nmi.c
arch/x86/kernel/pvclock.c
arch/x86/kvm/x86.c
include/asm-generic/bitops/atomic.h
include/asm-generic/bitops/lock.h
include/linux/context_tracking.h
include/linux/context_tracking_state.h
include/linux/cpumask.h
include/linux/jump_label.h
kernel/context_tracking.c
kernel/sched/clock.c

index 265801a3e94cfe883966bc7a52b3634c9f25ccc0..e8965f18686f0f78c341fea7a198a5f1fdc54d44 100644 (file)
@@ -417,9 +417,9 @@ noinstr static void nmi_ipi_lock_start(unsigned long *flags)
 {
        raw_local_irq_save(*flags);
        hard_irq_disable();
-       while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
+       while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
                raw_local_irq_restore(*flags);
-               spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
+               spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
                raw_local_irq_save(*flags);
                hard_irq_disable();
        }
@@ -427,15 +427,15 @@ noinstr static void nmi_ipi_lock_start(unsigned long *flags)
 
 noinstr static void nmi_ipi_lock(void)
 {
-       while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
-               spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
+       while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
+               spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
 }
 
 noinstr static void nmi_ipi_unlock(void)
 {
        smp_mb();
-       WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1);
-       arch_atomic_set(&__nmi_ipi_lock, 0);
+       WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1);
+       raw_atomic_set(&__nmi_ipi_lock, 0);
 }
 
 noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
index f615e0cb6d932cf6399ccff4c8056a6a42fd5180..18f16e93838fe344fddfb3020d8fd40f6ee5b566 100644 (file)
@@ -1799,7 +1799,7 @@ struct bp_patching_desc *try_get_desc(void)
 {
        struct bp_patching_desc *desc = &bp_desc;
 
-       if (!arch_atomic_inc_not_zero(&desc->refs))
+       if (!raw_atomic_inc_not_zero(&desc->refs))
                return NULL;
 
        return desc;
@@ -1810,7 +1810,7 @@ static __always_inline void put_desc(void)
        struct bp_patching_desc *desc = &bp_desc;
 
        smp_mb__before_atomic();
-       arch_atomic_dec(&desc->refs);
+       raw_atomic_dec(&desc->refs);
 }
 
 static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
index 2eec60f50057a7204ea9e36e208d2637614de06d..ab156e6e712085d589a94482bec1600961037ee1 100644 (file)
@@ -1022,12 +1022,12 @@ static noinstr int mce_start(int *no_way_out)
        if (!timeout)
                return ret;
 
-       arch_atomic_add(*no_way_out, &global_nwo);
+       raw_atomic_add(*no_way_out, &global_nwo);
        /*
         * Rely on the implied barrier below, such that global_nwo
         * is updated before mce_callin.
         */
-       order = arch_atomic_inc_return(&mce_callin);
+       order = raw_atomic_inc_return(&mce_callin);
        arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);
 
        /* Enable instrumentation around calls to external facilities */
@@ -1036,10 +1036,10 @@ static noinstr int mce_start(int *no_way_out)
        /*
         * Wait for everyone.
         */
-       while (arch_atomic_read(&mce_callin) != num_online_cpus()) {
+       while (raw_atomic_read(&mce_callin) != num_online_cpus()) {
                if (mce_timed_out(&timeout,
                                  "Timeout: Not all CPUs entered broadcast exception handler")) {
-                       arch_atomic_set(&global_nwo, 0);
+                       raw_atomic_set(&global_nwo, 0);
                        goto out;
                }
                ndelay(SPINUNIT);
@@ -1054,7 +1054,7 @@ static noinstr int mce_start(int *no_way_out)
                /*
                 * Monarch: Starts executing now, the others wait.
                 */
-               arch_atomic_set(&mce_executing, 1);
+               raw_atomic_set(&mce_executing, 1);
        } else {
                /*
                 * Subject: Now start the scanning loop one by one in
@@ -1062,10 +1062,10 @@ static noinstr int mce_start(int *no_way_out)
                 * This way when there are any shared banks it will be
                 * only seen by one CPU before cleared, avoiding duplicates.
                 */
-               while (arch_atomic_read(&mce_executing) < order) {
+               while (raw_atomic_read(&mce_executing) < order) {
                        if (mce_timed_out(&timeout,
                                          "Timeout: Subject CPUs unable to finish machine check processing")) {
-                               arch_atomic_set(&global_nwo, 0);
+                               raw_atomic_set(&global_nwo, 0);
                                goto out;
                        }
                        ndelay(SPINUNIT);
@@ -1075,7 +1075,7 @@ static noinstr int mce_start(int *no_way_out)
        /*
         * Cache the global no_way_out state.
         */
-       *no_way_out = arch_atomic_read(&global_nwo);
+       *no_way_out = raw_atomic_read(&global_nwo);
 
        ret = order;
 
index 776f4b1e395b5dd30e12af5447031ac9186c58c2..a0c551846b35f17627f2bccdff20ea8b1969c685 100644 (file)
@@ -496,7 +496,7 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
         */
        sev_es_nmi_complete();
        if (IS_ENABLED(CONFIG_NMI_CHECK_CPU))
-               arch_atomic_long_inc(&nsp->idt_calls);
+               raw_atomic_long_inc(&nsp->idt_calls);
 
        if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
                return;
index 56acf53a782ad8e1b4193b5bdf33063a91ba0bcf..b3f81379c2fc06fd127728ea6ca6db65d5a7b34a 100644 (file)
@@ -101,11 +101,11 @@ u64 __pvclock_clocksource_read(struct pvclock_vcpu_time_info *src, bool dowd)
         * updating at the same time, and one of them could be slightly behind,
         * making the assumption that last_value always go forward fail to hold.
         */
-       last = arch_atomic64_read(&last_value);
+       last = raw_atomic64_read(&last_value);
        do {
                if (ret <= last)
                        return last;
-       } while (!arch_atomic64_try_cmpxchg(&last_value, &last, ret));
+       } while (!raw_atomic64_try_cmpxchg(&last_value, &last, ret));
 
        return ret;
 }
index ceb7c5e9cf9e9235a63cee5f909ba2a0ae41d5b3..ac6f6090681064b32d24ba1cd27c1ba0160e629a 100644 (file)
@@ -13155,7 +13155,7 @@ EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
 
 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
 {
-       return arch_atomic_read(&kvm->arch.assigned_device_count);
+       return raw_atomic_read(&kvm->arch.assigned_device_count);
 }
 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
 
index 71ab4ba9c25d189b2f018c4e5c6e46ee82ebd7a8..e076e079f6b2ec9a3bd88866ef50bf6343541743 100644 (file)
@@ -15,21 +15,21 @@ static __always_inline void
 arch_set_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       arch_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
+       raw_atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
 static __always_inline void
 arch_clear_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       arch_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
+       raw_atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
 static __always_inline void
 arch_change_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       arch_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
+       raw_atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
 static __always_inline int
@@ -39,7 +39,7 @@ arch_test_and_set_bit(unsigned int nr, volatile unsigned long *p)
        unsigned long mask = BIT_MASK(nr);
 
        p += BIT_WORD(nr);
-       old = arch_atomic_long_fetch_or(mask, (atomic_long_t *)p);
+       old = raw_atomic_long_fetch_or(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
@@ -50,7 +50,7 @@ arch_test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
        unsigned long mask = BIT_MASK(nr);
 
        p += BIT_WORD(nr);
-       old = arch_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+       old = raw_atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
@@ -61,7 +61,7 @@ arch_test_and_change_bit(unsigned int nr, volatile unsigned long *p)
        unsigned long mask = BIT_MASK(nr);
 
        p += BIT_WORD(nr);
-       old = arch_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+       old = raw_atomic_long_fetch_xor(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
index 630f2f6b95956877e1b1b97ab39e41e33ae01edb..40913516e654c9548cf868dacda1e11f9fe1a135 100644 (file)
@@ -25,7 +25,7 @@ arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
        if (READ_ONCE(*p) & mask)
                return 1;
 
-       old = arch_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+       old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
        return !!(old & mask);
 }
 
@@ -41,7 +41,7 @@ static __always_inline void
 arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
-       arch_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+       raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
 /**
@@ -63,7 +63,7 @@ arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
        p += BIT_WORD(nr);
        old = READ_ONCE(*p);
        old &= ~BIT_MASK(nr);
-       arch_atomic_long_set_release((atomic_long_t *)p, old);
+       raw_atomic_long_set_release((atomic_long_t *)p, old);
 }
 
 /**
@@ -83,7 +83,7 @@ static inline bool arch_clear_bit_unlock_is_negative_byte(unsigned int nr,
        unsigned long mask = BIT_MASK(nr);
 
        p += BIT_WORD(nr);
-       old = arch_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+       old = raw_atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
        return !!(old & BIT(7));
 }
 #define arch_clear_bit_unlock_is_negative_byte arch_clear_bit_unlock_is_negative_byte
index d3cbb6c16babfa4cdf83776f7e4e2e67c0d4ead5..6e76b9dba00e77d9e28e6044f6684acf6ff28ee1 100644 (file)
@@ -119,7 +119,7 @@ extern void ct_idle_exit(void);
  */
 static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
 {
-       return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
+       return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
 }
 
 /*
@@ -128,7 +128,7 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
  */
 static __always_inline unsigned long ct_state_inc(int incby)
 {
-       return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
+       return raw_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
 }
 
 static __always_inline bool warn_rcu_enter(void)
index fdd537ea513ffa35562cc636900e0a0a5b7faaa4..bbff5f7f8803062fa3e38ee69b35fbc9de3efe79 100644 (file)
@@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct context_tracking, context_tracking);
 #ifdef CONFIG_CONTEXT_TRACKING_USER
 static __always_inline int __ct_state(void)
 {
-       return arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
+       return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
 }
 #endif
 
index ca736b05ec7b056ebafae5f347913ce1fec97d2d..0d2e2a38b92d0d115f5d4d6a1d37b7400347ba0a 100644 (file)
@@ -1071,7 +1071,7 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
  */
 static __always_inline unsigned int num_online_cpus(void)
 {
-       return arch_atomic_read(&__num_online_cpus);
+       return raw_atomic_read(&__num_online_cpus);
 }
 #define num_possible_cpus()    cpumask_weight(cpu_possible_mask)
 #define num_present_cpus()     cpumask_weight(cpu_present_mask)
index 4e968ebadce60ce9e1eebb5618eccd26d7875d12..f0a949b7c9733ce3209fb3adc561ac2f4d93d3ca 100644 (file)
@@ -257,7 +257,7 @@ extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
 
 static __always_inline int static_key_count(struct static_key *key)
 {
-       return arch_atomic_read(&key->enabled);
+       return raw_atomic_read(&key->enabled);
 }
 
 static __always_inline void jump_label_init(void)
index a09f1c19336ae8eadbaa13a4cefe5ce8efd1e1ec..6ef0b35fc28c5a50434837f89d798060a58e26b9 100644 (file)
@@ -510,7 +510,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
                         * In this we case we don't care about any concurrency/ordering.
                         */
                        if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
-                               arch_atomic_set(&ct->state, state);
+                               raw_atomic_set(&ct->state, state);
                } else {
                        /*
                         * Even if context tracking is disabled on this CPU, because it's outside
@@ -527,7 +527,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
                         */
                        if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
                                /* Tracking for vtime only, no concurrent RCU EQS accounting */
-                               arch_atomic_set(&ct->state, state);
+                               raw_atomic_set(&ct->state, state);
                        } else {
                                /*
                                 * Tracking for vtime and RCU EQS. Make sure we don't race
@@ -535,7 +535,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
                                 * RCU only requires RCU_DYNTICKS_IDX increments to be fully
                                 * ordered.
                                 */
-                               arch_atomic_add(state, &ct->state);
+                               raw_atomic_add(state, &ct->state);
                        }
                }
        }
@@ -630,12 +630,12 @@ void noinstr __ct_user_exit(enum ctx_state state)
                         * In this we case we don't care about any concurrency/ordering.
                         */
                        if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE))
-                               arch_atomic_set(&ct->state, CONTEXT_KERNEL);
+                               raw_atomic_set(&ct->state, CONTEXT_KERNEL);
 
                } else {
                        if (!IS_ENABLED(CONFIG_CONTEXT_TRACKING_IDLE)) {
                                /* Tracking for vtime only, no concurrent RCU EQS accounting */
-                               arch_atomic_set(&ct->state, CONTEXT_KERNEL);
+                               raw_atomic_set(&ct->state, CONTEXT_KERNEL);
                        } else {
                                /*
                                 * Tracking for vtime and RCU EQS. Make sure we don't race
@@ -643,7 +643,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
                                 * RCU only requires RCU_DYNTICKS_IDX increments to be fully
                                 * ordered.
                                 */
-                               arch_atomic_sub(state, &ct->state);
+                               raw_atomic_sub(state, &ct->state);
                        }
                }
        }
index b5cc2b53464de7d430d35d2f9a9bedd730950d49..71443cff31f0dfd56830044dfc4fcf2be36ff55c 100644 (file)
@@ -287,7 +287,7 @@ again:
        clock = wrap_max(clock, min_clock);
        clock = wrap_min(clock, max_clock);
 
-       if (!arch_try_cmpxchg64(&scd->clock, &old_clock, clock))
+       if (!raw_try_cmpxchg64(&scd->clock, &old_clock, clock))
                goto again;
 
        return clock;