arm64: atomics: Undefine internal macros after use
authorWill Deacon <will@kernel.org>
Thu, 29 Aug 2019 13:33:23 +0000 (14:33 +0100)
committerWill Deacon <will@kernel.org>
Fri, 30 Aug 2019 10:18:37 +0000 (11:18 +0100)
We use a bunch of internal macros when constructing our atomic and
cmpxchg routines in order to save on boilerplate. Avoid exposing these
directly to users of the header files.

Reviewed-by: Andrew Murray <andrew.murray@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/cmpxchg.h

index 7c334337674db3f70d806cdf9a1ff5f501640989..916e5a6d545462b1e770dcd76e1289342297519f 100644 (file)
@@ -32,6 +32,7 @@ ATOMIC_OP(atomic_add)
 ATOMIC_OP(atomic_and)
 ATOMIC_OP(atomic_sub)
 
+#undef ATOMIC_OP
 
 #define ATOMIC_FETCH_OP(name, op)                                      \
 static inline int arch_##op##name(int i, atomic_t *v)                  \
@@ -54,6 +55,8 @@ ATOMIC_FETCH_OPS(atomic_fetch_sub)
 ATOMIC_FETCH_OPS(atomic_add_return)
 ATOMIC_FETCH_OPS(atomic_sub_return)
 
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_FETCH_OPS
 
 #define ATOMIC64_OP(op)                                                        \
 static inline void arch_##op(long i, atomic64_t *v)                    \
@@ -68,6 +71,7 @@ ATOMIC64_OP(atomic64_add)
 ATOMIC64_OP(atomic64_and)
 ATOMIC64_OP(atomic64_sub)
 
+#undef ATOMIC64_OP
 
 #define ATOMIC64_FETCH_OP(name, op)                                    \
 static inline long arch_##op##name(long i, atomic64_t *v)              \
@@ -90,6 +94,9 @@ ATOMIC64_FETCH_OPS(atomic64_fetch_sub)
 ATOMIC64_FETCH_OPS(atomic64_add_return)
 ATOMIC64_FETCH_OPS(atomic64_sub_return)
 
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_FETCH_OPS
+
 static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        return __lse_ll_sc_body(atomic64_dec_if_positive, v);
index afaba73e0b2c0d9984d62975f5619e55a0ba4e63..a1398f2f9994fcdc0e110731feffa550ab765964 100644 (file)
@@ -129,6 +129,8 @@ __CMPXCHG_CASE(mb_, 16)
 __CMPXCHG_CASE(mb_, 32)
 __CMPXCHG_CASE(mb_, 64)
 
+#undef __CMPXCHG_CASE
+
 #define __CMPXCHG_DBL(name)                                            \
 static inline long __cmpxchg_double##name(unsigned long old1,          \
                                         unsigned long old2,            \
@@ -143,6 +145,8 @@ static inline long __cmpxchg_double##name(unsigned long old1,               \
 __CMPXCHG_DBL(   )
 __CMPXCHG_DBL(_mb)
 
+#undef __CMPXCHG_DBL
+
 #define __CMPXCHG_GEN(sfx)                                             \
 static inline unsigned long __cmpxchg##sfx(volatile void *ptr,         \
                                           unsigned long old,           \