Merge tag 'samsung-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/kgene...
[sfrench/cifs-2.6.git] / arch / arm64 / include / asm / atomic.h
index 65f1569ac96e5e2772548f20e6f172c8ee2ad0b4..7047051ded40e3e9c3cc944fe0ce456bb6eaf897 100644 (file)
@@ -35,7 +35,7 @@
  * strex/ldrex monitor on some implementations. The reason we can use it for
  * atomic_set() is the clrex or dummy strex done on every exception return.
  */
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
 #define atomic_set(v,i)        (((v)->counter) = (i))
 
 /*
  * store exclusive to ensure that these are atomic.  We may loop
  * to ensure that the update happens.
  */
-static inline void atomic_add(int i, atomic_t *v)
-{
-       unsigned long tmp;
-       int result;
-
-       asm volatile("// atomic_add\n"
-"1:    ldxr    %w0, %2\n"
-"      add     %w0, %w0, %w3\n"
-"      stxr    %w1, %w0, %2\n"
-"      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-       : "Ir" (i));
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-       unsigned long tmp;
-       int result;
-
-       asm volatile("// atomic_add_return\n"
-"1:    ldxr    %w0, %2\n"
-"      add     %w0, %w0, %w3\n"
-"      stlxr   %w1, %w0, %2\n"
-"      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-       : "Ir" (i)
-       : "memory");
-
-       smp_mb();
-       return result;
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
-       unsigned long tmp;
-       int result;
 
-       asm volatile("// atomic_sub\n"
-"1:    ldxr    %w0, %2\n"
-"      sub     %w0, %w0, %w3\n"
-"      stxr    %w1, %w0, %2\n"
-"      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-       : "Ir" (i));
+#define ATOMIC_OP(op, asm_op)                                          \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       unsigned long tmp;                                              \
+       int result;                                                     \
+                                                                       \
+       asm volatile("// atomic_" #op "\n"                              \
+"1:    ldxr    %w0, %2\n"                                              \
+"      " #asm_op "     %w0, %w0, %w3\n"                                \
+"      stxr    %w1, %w0, %2\n"                                         \
+"      cbnz    %w1, 1b"                                                \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : "Ir" (i));                                                    \
+}                                                                      \
+
+#define ATOMIC_OP_RETURN(op, asm_op)                                   \
+static inline int atomic_##op##_return(int i, atomic_t *v)             \
+{                                                                      \
+       unsigned long tmp;                                              \
+       int result;                                                     \
+                                                                       \
+       asm volatile("// atomic_" #op "_return\n"                       \
+"1:    ldxr    %w0, %2\n"                                              \
+"      " #asm_op "     %w0, %w0, %w3\n"                                \
+"      stlxr   %w1, %w0, %2\n"                                         \
+"      cbnz    %w1, 1b"                                                \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : "Ir" (i)                                                      \
+       : "memory");                                                    \
+                                                                       \
+       smp_mb();                                                       \
+       return result;                                                  \
 }
 
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-       unsigned long tmp;
-       int result;
+#define ATOMIC_OPS(op, asm_op)                                         \
+       ATOMIC_OP(op, asm_op)                                           \
+       ATOMIC_OP_RETURN(op, asm_op)
 
-       asm volatile("// atomic_sub_return\n"
-"1:    ldxr    %w0, %2\n"
-"      sub     %w0, %w0, %w3\n"
-"      stlxr   %w1, %w0, %2\n"
-"      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-       : "Ir" (i)
-       : "memory");
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
 
-       smp_mb();
-       return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
 
 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 {
@@ -157,72 +139,53 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  */
 #define ATOMIC64_INIT(i) { (i) }
 
-#define atomic64_read(v)       (*(volatile long *)&(v)->counter)
+#define atomic64_read(v)       ACCESS_ONCE((v)->counter)
 #define atomic64_set(v,i)      (((v)->counter) = (i))
 
-static inline void atomic64_add(u64 i, atomic64_t *v)
-{
-       long result;
-       unsigned long tmp;
-
-       asm volatile("// atomic64_add\n"
-"1:    ldxr    %0, %2\n"
-"      add     %0, %0, %3\n"
-"      stxr    %w1, %0, %2\n"
-"      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-       : "Ir" (i));
+#define ATOMIC64_OP(op, asm_op)                                                \
+static inline void atomic64_##op(long i, atomic64_t *v)                        \
+{                                                                      \
+       long result;                                                    \
+       unsigned long tmp;                                              \
+                                                                       \
+       asm volatile("// atomic64_" #op "\n"                            \
+"1:    ldxr    %0, %2\n"                                               \
+"      " #asm_op "     %0, %0, %3\n"                                   \
+"      stxr    %w1, %0, %2\n"                                          \
+"      cbnz    %w1, 1b"                                                \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : "Ir" (i));                                                    \
+}                                                                      \
+
+#define ATOMIC64_OP_RETURN(op, asm_op)                                 \
+static inline long atomic64_##op##_return(long i, atomic64_t *v)       \
+{                                                                      \
+       long result;                                                    \
+       unsigned long tmp;                                              \
+                                                                       \
+       asm volatile("// atomic64_" #op "_return\n"                     \
+"1:    ldxr    %0, %2\n"                                               \
+"      " #asm_op "     %0, %0, %3\n"                                   \
+"      stlxr   %w1, %0, %2\n"                                          \
+"      cbnz    %w1, 1b"                                                \
+       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
+       : "Ir" (i)                                                      \
+       : "memory");                                                    \
+                                                                       \
+       smp_mb();                                                       \
+       return result;                                                  \
 }
 
-static inline long atomic64_add_return(long i, atomic64_t *v)
-{
-       long result;
-       unsigned long tmp;
+#define ATOMIC64_OPS(op, asm_op)                                       \
+       ATOMIC64_OP(op, asm_op)                                         \
+       ATOMIC64_OP_RETURN(op, asm_op)
 
-       asm volatile("// atomic64_add_return\n"
-"1:    ldxr    %0, %2\n"
-"      add     %0, %0, %3\n"
-"      stlxr   %w1, %0, %2\n"
-"      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-       : "Ir" (i)
-       : "memory");
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, sub)
 
-       smp_mb();
-       return result;
-}
-
-static inline void atomic64_sub(u64 i, atomic64_t *v)
-{
-       long result;
-       unsigned long tmp;
-
-       asm volatile("// atomic64_sub\n"
-"1:    ldxr    %0, %2\n"
-"      sub     %0, %0, %3\n"
-"      stxr    %w1, %0, %2\n"
-"      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-       : "Ir" (i));
-}
-
-static inline long atomic64_sub_return(long i, atomic64_t *v)
-{
-       long result;
-       unsigned long tmp;
-
-       asm volatile("// atomic64_sub_return\n"
-"1:    ldxr    %0, %2\n"
-"      sub     %0, %0, %3\n"
-"      stlxr   %w1, %0, %2\n"
-"      cbnz    %w1, 1b"
-       : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
-       : "Ir" (i)
-       : "memory");
-
-       smp_mb();
-       return result;
-}
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
 
 static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
 {