Merge branch 'locking/atomics' into locking/core, to pick up WIP commits
[sfrench/cifs-2.6.git] / arch / arm64 / include / asm / atomic_ll_sc.h
index af7b990054536c0054fe71f8c8cae080f266f502..e321293e0c8955c1bf99a0959e7b8697ae95ed83 100644 (file)
@@ -39,7 +39,7 @@
 
 #define ATOMIC_OP(op, asm_op)                                          \
 __LL_SC_INLINE void                                                    \
-__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))                                \
+__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v))                   \
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
@@ -53,11 +53,11 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))                             \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : "Ir" (i));                                                    \
 }                                                                      \
-__LL_SC_EXPORT(atomic_##op);
+__LL_SC_EXPORT(arch_atomic_##op);
 
 #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)           \
 __LL_SC_INLINE int                                                     \
-__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v))         \
+__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v))    \
 {                                                                      \
        unsigned long tmp;                                              \
        int result;                                                     \
@@ -75,11 +75,11 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v))              \
                                                                        \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic_##op##_return##name);
+__LL_SC_EXPORT(arch_atomic_##op##_return##name);
 
 #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)            \
 __LL_SC_INLINE int                                                     \
-__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v))            \
+__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v))       \
 {                                                                      \
        unsigned long tmp;                                              \
        int val, result;                                                \
@@ -97,7 +97,7 @@ __LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v))           \
                                                                        \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic_fetch_##op##name);
+__LL_SC_EXPORT(arch_atomic_fetch_##op##name);
 
 #define ATOMIC_OPS(...)                                                        \
        ATOMIC_OP(__VA_ARGS__)                                          \
@@ -133,7 +133,7 @@ ATOMIC_OPS(xor, eor)
 
 #define ATOMIC64_OP(op, asm_op)                                                \
 __LL_SC_INLINE void                                                    \
-__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))                   \
+__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v))              \
 {                                                                      \
        long result;                                                    \
        unsigned long tmp;                                              \
@@ -147,11 +147,11 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))                      \
        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
        : "Ir" (i));                                                    \
 }                                                                      \
-__LL_SC_EXPORT(atomic64_##op);
+__LL_SC_EXPORT(arch_atomic64_##op);
 
 #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)         \
 __LL_SC_INLINE long                                                    \
-__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v))    \
+__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
 {                                                                      \
        long result;                                                    \
        unsigned long tmp;                                              \
@@ -169,11 +169,11 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v))       \
                                                                        \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic64_##op##_return##name);
+__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
 
 #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)          \
 __LL_SC_INLINE long                                                    \
-__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v))       \
+__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v))  \
 {                                                                      \
        long result, val;                                               \
        unsigned long tmp;                                              \
@@ -191,7 +191,7 @@ __LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v))    \
                                                                        \
        return result;                                                  \
 }                                                                      \
-__LL_SC_EXPORT(atomic64_fetch_##op##name);
+__LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
 
 #define ATOMIC64_OPS(...)                                              \
        ATOMIC64_OP(__VA_ARGS__)                                        \
@@ -226,7 +226,7 @@ ATOMIC64_OPS(xor, eor)
 #undef ATOMIC64_OP
 
 __LL_SC_INLINE long
-__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
+__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
 {
        long result;
        unsigned long tmp;
@@ -246,7 +246,7 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
 
        return result;
 }
-__LL_SC_EXPORT(atomic64_dec_if_positive);
+__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
 
 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl)             \
 __LL_SC_INLINE u##sz                                                   \