Merge tag 'metag-for-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan...
[sfrench/cifs-2.6.git] / arch / alpha / include / asm / atomic.h
1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
3
4 #include <linux/types.h>
5 #include <asm/barrier.h>
6 #include <asm/cmpxchg.h>
7
8 /*
9  * Atomic operations that C can't guarantee us.  Useful for
10  * resource counting etc...
11  *
12  * But use these as seldom as possible since they are much slower
13  * than regular operations.
14  */
15
16
17 #define ATOMIC_INIT(i)          { (i) }
18 #define ATOMIC64_INIT(i)        { (i) }
19
20 #define atomic_read(v)          READ_ONCE((v)->counter)
21 #define atomic64_read(v)        READ_ONCE((v)->counter)
22
23 #define atomic_set(v,i)         WRITE_ONCE((v)->counter, (i))
24 #define atomic64_set(v,i)       WRITE_ONCE((v)->counter, (i))
25
26 /*
27  * To get proper branch prediction for the main line, we must branch
28  * forward to code at the end of this object's .text section, then
29  * branch back to restart the operation.
30  */
31
32 #define ATOMIC_OP(op, asm_op)                                           \
33 static __inline__ void atomic_##op(int i, atomic_t * v)                 \
34 {                                                                       \
35         unsigned long temp;                                             \
36         __asm__ __volatile__(                                           \
37         "1:     ldl_l %0,%1\n"                                          \
38         "       " #asm_op " %0,%2,%0\n"                                 \
39         "       stl_c %0,%1\n"                                          \
40         "       beq %0,2f\n"                                            \
41         ".subsection 2\n"                                               \
42         "2:     br 1b\n"                                                \
43         ".previous"                                                     \
44         :"=&r" (temp), "=m" (v->counter)                                \
45         :"Ir" (i), "m" (v->counter));                                   \
46 }                                                                       \
47
48 #define ATOMIC_OP_RETURN(op, asm_op)                                    \
49 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)      \
50 {                                                                       \
51         long temp, result;                                              \
52         __asm__ __volatile__(                                           \
53         "1:     ldl_l %0,%1\n"                                          \
54         "       " #asm_op " %0,%3,%2\n"                                 \
55         "       " #asm_op " %0,%3,%0\n"                                 \
56         "       stl_c %0,%1\n"                                          \
57         "       beq %0,2f\n"                                            \
58         ".subsection 2\n"                                               \
59         "2:     br 1b\n"                                                \
60         ".previous"                                                     \
61         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
62         :"Ir" (i), "m" (v->counter) : "memory");                        \
63         return result;                                                  \
64 }
65
66 #define ATOMIC_FETCH_OP(op, asm_op)                                     \
67 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)       \
68 {                                                                       \
69         long temp, result;                                              \
70         __asm__ __volatile__(                                           \
71         "1:     ldl_l %2,%1\n"                                          \
72         "       " #asm_op " %2,%3,%0\n"                                 \
73         "       stl_c %0,%1\n"                                          \
74         "       beq %0,2f\n"                                            \
75         ".subsection 2\n"                                               \
76         "2:     br 1b\n"                                                \
77         ".previous"                                                     \
78         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
79         :"Ir" (i), "m" (v->counter) : "memory");                        \
80         return result;                                                  \
81 }
82
83 #define ATOMIC64_OP(op, asm_op)                                         \
84 static __inline__ void atomic64_##op(long i, atomic64_t * v)            \
85 {                                                                       \
86         unsigned long temp;                                             \
87         __asm__ __volatile__(                                           \
88         "1:     ldq_l %0,%1\n"                                          \
89         "       " #asm_op " %0,%2,%0\n"                                 \
90         "       stq_c %0,%1\n"                                          \
91         "       beq %0,2f\n"                                            \
92         ".subsection 2\n"                                               \
93         "2:     br 1b\n"                                                \
94         ".previous"                                                     \
95         :"=&r" (temp), "=m" (v->counter)                                \
96         :"Ir" (i), "m" (v->counter));                                   \
97 }                                                                       \
98
99 #define ATOMIC64_OP_RETURN(op, asm_op)                                  \
100 static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v)   \
101 {                                                                       \
102         long temp, result;                                              \
103         __asm__ __volatile__(                                           \
104         "1:     ldq_l %0,%1\n"                                          \
105         "       " #asm_op " %0,%3,%2\n"                                 \
106         "       " #asm_op " %0,%3,%0\n"                                 \
107         "       stq_c %0,%1\n"                                          \
108         "       beq %0,2f\n"                                            \
109         ".subsection 2\n"                                               \
110         "2:     br 1b\n"                                                \
111         ".previous"                                                     \
112         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
113         :"Ir" (i), "m" (v->counter) : "memory");                        \
114         return result;                                                  \
115 }
116
117 #define ATOMIC64_FETCH_OP(op, asm_op)                                   \
118 static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)    \
119 {                                                                       \
120         long temp, result;                                              \
121         __asm__ __volatile__(                                           \
122         "1:     ldq_l %2,%1\n"                                          \
123         "       " #asm_op " %2,%3,%0\n"                                 \
124         "       stq_c %0,%1\n"                                          \
125         "       beq %0,2f\n"                                            \
126         ".subsection 2\n"                                               \
127         "2:     br 1b\n"                                                \
128         ".previous"                                                     \
129         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
130         :"Ir" (i), "m" (v->counter) : "memory");                        \
131         return result;                                                  \
132 }
133
134 #define ATOMIC_OPS(op)                                                  \
135         ATOMIC_OP(op, op##l)                                            \
136         ATOMIC_OP_RETURN(op, op##l)                                     \
137         ATOMIC_FETCH_OP(op, op##l)                                      \
138         ATOMIC64_OP(op, op##q)                                          \
139         ATOMIC64_OP_RETURN(op, op##q)                                   \
140         ATOMIC64_FETCH_OP(op, op##q)
141
142 ATOMIC_OPS(add)
143 ATOMIC_OPS(sub)
144
145 #define atomic_add_return_relaxed       atomic_add_return_relaxed
146 #define atomic_sub_return_relaxed       atomic_sub_return_relaxed
147 #define atomic_fetch_add_relaxed        atomic_fetch_add_relaxed
148 #define atomic_fetch_sub_relaxed        atomic_fetch_sub_relaxed
149
150 #define atomic64_add_return_relaxed     atomic64_add_return_relaxed
151 #define atomic64_sub_return_relaxed     atomic64_sub_return_relaxed
152 #define atomic64_fetch_add_relaxed      atomic64_fetch_add_relaxed
153 #define atomic64_fetch_sub_relaxed      atomic64_fetch_sub_relaxed
154
155 #define atomic_andnot atomic_andnot
156 #define atomic64_andnot atomic64_andnot
157
158 #undef ATOMIC_OPS
159 #define ATOMIC_OPS(op, asm)                                             \
160         ATOMIC_OP(op, asm)                                              \
161         ATOMIC_FETCH_OP(op, asm)                                        \
162         ATOMIC64_OP(op, asm)                                            \
163         ATOMIC64_FETCH_OP(op, asm)
164
165 ATOMIC_OPS(and, and)
166 ATOMIC_OPS(andnot, bic)
167 ATOMIC_OPS(or, bis)
168 ATOMIC_OPS(xor, xor)
169
170 #define atomic_fetch_and_relaxed        atomic_fetch_and_relaxed
171 #define atomic_fetch_andnot_relaxed     atomic_fetch_andnot_relaxed
172 #define atomic_fetch_or_relaxed         atomic_fetch_or_relaxed
173 #define atomic_fetch_xor_relaxed        atomic_fetch_xor_relaxed
174
175 #define atomic64_fetch_and_relaxed      atomic64_fetch_and_relaxed
176 #define atomic64_fetch_andnot_relaxed   atomic64_fetch_andnot_relaxed
177 #define atomic64_fetch_or_relaxed       atomic64_fetch_or_relaxed
178 #define atomic64_fetch_xor_relaxed      atomic64_fetch_xor_relaxed
179
180 #undef ATOMIC_OPS
181 #undef ATOMIC64_FETCH_OP
182 #undef ATOMIC64_OP_RETURN
183 #undef ATOMIC64_OP
184 #undef ATOMIC_FETCH_OP
185 #undef ATOMIC_OP_RETURN
186 #undef ATOMIC_OP
187
188 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
189 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
190
191 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
192 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
193
194 /**
195  * __atomic_add_unless - add unless the number is a given value
196  * @v: pointer of type atomic_t
197  * @a: the amount to add to v...
198  * @u: ...unless v is equal to u.
199  *
200  * Atomically adds @a to @v, so long as it was not @u.
201  * Returns the old value of @v.
202  */
203 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
204 {
205         int c, new, old;
206         smp_mb();
207         __asm__ __volatile__(
208         "1:     ldl_l   %[old],%[mem]\n"
209         "       cmpeq   %[old],%[u],%[c]\n"
210         "       addl    %[old],%[a],%[new]\n"
211         "       bne     %[c],2f\n"
212         "       stl_c   %[new],%[mem]\n"
213         "       beq     %[new],3f\n"
214         "2:\n"
215         ".subsection 2\n"
216         "3:     br      1b\n"
217         ".previous"
218         : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
219         : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
220         : "memory");
221         smp_mb();
222         return old;
223 }
224
225
226 /**
227  * atomic64_add_unless - add unless the number is a given value
228  * @v: pointer of type atomic64_t
229  * @a: the amount to add to v...
230  * @u: ...unless v is equal to u.
231  *
232  * Atomically adds @a to @v, so long as it was not @u.
233  * Returns true iff @v was not @u.
234  */
235 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
236 {
237         long c, tmp;
238         smp_mb();
239         __asm__ __volatile__(
240         "1:     ldq_l   %[tmp],%[mem]\n"
241         "       cmpeq   %[tmp],%[u],%[c]\n"
242         "       addq    %[tmp],%[a],%[tmp]\n"
243         "       bne     %[c],2f\n"
244         "       stq_c   %[tmp],%[mem]\n"
245         "       beq     %[tmp],3f\n"
246         "2:\n"
247         ".subsection 2\n"
248         "3:     br      1b\n"
249         ".previous"
250         : [tmp] "=&r"(tmp), [c] "=&r"(c)
251         : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
252         : "memory");
253         smp_mb();
254         return !c;
255 }
256
257 /*
258  * atomic64_dec_if_positive - decrement by 1 if old value positive
259  * @v: pointer of type atomic_t
260  *
261  * The function returns the old value of *v minus 1, even if
262  * the atomic variable, v, was not decremented.
263  */
264 static inline long atomic64_dec_if_positive(atomic64_t *v)
265 {
266         long old, tmp;
267         smp_mb();
268         __asm__ __volatile__(
269         "1:     ldq_l   %[old],%[mem]\n"
270         "       subq    %[old],1,%[tmp]\n"
271         "       ble     %[old],2f\n"
272         "       stq_c   %[tmp],%[mem]\n"
273         "       beq     %[tmp],3f\n"
274         "2:\n"
275         ".subsection 2\n"
276         "3:     br      1b\n"
277         ".previous"
278         : [old] "=&r"(old), [tmp] "=&r"(tmp)
279         : [mem] "m"(*v)
280         : "memory");
281         smp_mb();
282         return old - 1;
283 }
284
285 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
286
287 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
288 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
289
290 #define atomic_dec_return(v) atomic_sub_return(1,(v))
291 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
292
293 #define atomic_inc_return(v) atomic_add_return(1,(v))
294 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
295
296 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
297 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
298
299 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
300 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
301
302 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
303 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
304
305 #define atomic_inc(v) atomic_add(1,(v))
306 #define atomic64_inc(v) atomic64_add(1,(v))
307
308 #define atomic_dec(v) atomic_sub(1,(v))
309 #define atomic64_dec(v) atomic64_sub(1,(v))
310
311 #endif /* _ALPHA_ATOMIC_H */