Merge remote-tracking branch 'asoc/topic/dapm' into asoc-next
[sfrench/cifs-2.6.git] / arch / alpha / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_ATOMIC_H
3 #define _ALPHA_ATOMIC_H
4
5 #include <linux/types.h>
6 #include <asm/barrier.h>
7 #include <asm/cmpxchg.h>
8
9 /*
10  * Atomic operations that C can't guarantee us.  Useful for
11  * resource counting etc...
12  *
13  * But use these as seldom as possible since they are much slower
14  * than regular operations.
15  */
16
17
18 #define ATOMIC_INIT(i)          { (i) }
19 #define ATOMIC64_INIT(i)        { (i) }
20
21 #define atomic_read(v)          READ_ONCE((v)->counter)
22 #define atomic64_read(v)        READ_ONCE((v)->counter)
23
24 #define atomic_set(v,i)         WRITE_ONCE((v)->counter, (i))
25 #define atomic64_set(v,i)       WRITE_ONCE((v)->counter, (i))
26
27 /*
28  * To get proper branch prediction for the main line, we must branch
29  * forward to code at the end of this object's .text section, then
30  * branch back to restart the operation.
31  */
32
33 #define ATOMIC_OP(op, asm_op)                                           \
34 static __inline__ void atomic_##op(int i, atomic_t * v)                 \
35 {                                                                       \
36         unsigned long temp;                                             \
37         __asm__ __volatile__(                                           \
38         "1:     ldl_l %0,%1\n"                                          \
39         "       " #asm_op " %0,%2,%0\n"                                 \
40         "       stl_c %0,%1\n"                                          \
41         "       beq %0,2f\n"                                            \
42         ".subsection 2\n"                                               \
43         "2:     br 1b\n"                                                \
44         ".previous"                                                     \
45         :"=&r" (temp), "=m" (v->counter)                                \
46         :"Ir" (i), "m" (v->counter));                                   \
47 }                                                                       \
48
49 #define ATOMIC_OP_RETURN(op, asm_op)                                    \
50 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)      \
51 {                                                                       \
52         long temp, result;                                              \
53         __asm__ __volatile__(                                           \
54         "1:     ldl_l %0,%1\n"                                          \
55         "       " #asm_op " %0,%3,%2\n"                                 \
56         "       " #asm_op " %0,%3,%0\n"                                 \
57         "       stl_c %0,%1\n"                                          \
58         "       beq %0,2f\n"                                            \
59         ".subsection 2\n"                                               \
60         "2:     br 1b\n"                                                \
61         ".previous"                                                     \
62         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
63         :"Ir" (i), "m" (v->counter) : "memory");                        \
64         return result;                                                  \
65 }
66
67 #define ATOMIC_FETCH_OP(op, asm_op)                                     \
68 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)       \
69 {                                                                       \
70         long temp, result;                                              \
71         __asm__ __volatile__(                                           \
72         "1:     ldl_l %2,%1\n"                                          \
73         "       " #asm_op " %2,%3,%0\n"                                 \
74         "       stl_c %0,%1\n"                                          \
75         "       beq %0,2f\n"                                            \
76         ".subsection 2\n"                                               \
77         "2:     br 1b\n"                                                \
78         ".previous"                                                     \
79         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
80         :"Ir" (i), "m" (v->counter) : "memory");                        \
81         return result;                                                  \
82 }
83
84 #define ATOMIC64_OP(op, asm_op)                                         \
85 static __inline__ void atomic64_##op(long i, atomic64_t * v)            \
86 {                                                                       \
87         unsigned long temp;                                             \
88         __asm__ __volatile__(                                           \
89         "1:     ldq_l %0,%1\n"                                          \
90         "       " #asm_op " %0,%2,%0\n"                                 \
91         "       stq_c %0,%1\n"                                          \
92         "       beq %0,2f\n"                                            \
93         ".subsection 2\n"                                               \
94         "2:     br 1b\n"                                                \
95         ".previous"                                                     \
96         :"=&r" (temp), "=m" (v->counter)                                \
97         :"Ir" (i), "m" (v->counter));                                   \
98 }                                                                       \
99
100 #define ATOMIC64_OP_RETURN(op, asm_op)                                  \
101 static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v)   \
102 {                                                                       \
103         long temp, result;                                              \
104         __asm__ __volatile__(                                           \
105         "1:     ldq_l %0,%1\n"                                          \
106         "       " #asm_op " %0,%3,%2\n"                                 \
107         "       " #asm_op " %0,%3,%0\n"                                 \
108         "       stq_c %0,%1\n"                                          \
109         "       beq %0,2f\n"                                            \
110         ".subsection 2\n"                                               \
111         "2:     br 1b\n"                                                \
112         ".previous"                                                     \
113         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
114         :"Ir" (i), "m" (v->counter) : "memory");                        \
115         return result;                                                  \
116 }
117
118 #define ATOMIC64_FETCH_OP(op, asm_op)                                   \
119 static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)    \
120 {                                                                       \
121         long temp, result;                                              \
122         __asm__ __volatile__(                                           \
123         "1:     ldq_l %2,%1\n"                                          \
124         "       " #asm_op " %2,%3,%0\n"                                 \
125         "       stq_c %0,%1\n"                                          \
126         "       beq %0,2f\n"                                            \
127         ".subsection 2\n"                                               \
128         "2:     br 1b\n"                                                \
129         ".previous"                                                     \
130         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
131         :"Ir" (i), "m" (v->counter) : "memory");                        \
132         return result;                                                  \
133 }
134
135 #define ATOMIC_OPS(op)                                                  \
136         ATOMIC_OP(op, op##l)                                            \
137         ATOMIC_OP_RETURN(op, op##l)                                     \
138         ATOMIC_FETCH_OP(op, op##l)                                      \
139         ATOMIC64_OP(op, op##q)                                          \
140         ATOMIC64_OP_RETURN(op, op##q)                                   \
141         ATOMIC64_FETCH_OP(op, op##q)
142
143 ATOMIC_OPS(add)
144 ATOMIC_OPS(sub)
145
146 #define atomic_add_return_relaxed       atomic_add_return_relaxed
147 #define atomic_sub_return_relaxed       atomic_sub_return_relaxed
148 #define atomic_fetch_add_relaxed        atomic_fetch_add_relaxed
149 #define atomic_fetch_sub_relaxed        atomic_fetch_sub_relaxed
150
151 #define atomic64_add_return_relaxed     atomic64_add_return_relaxed
152 #define atomic64_sub_return_relaxed     atomic64_sub_return_relaxed
153 #define atomic64_fetch_add_relaxed      atomic64_fetch_add_relaxed
154 #define atomic64_fetch_sub_relaxed      atomic64_fetch_sub_relaxed
155
156 #define atomic_andnot atomic_andnot
157 #define atomic64_andnot atomic64_andnot
158
159 #undef ATOMIC_OPS
160 #define ATOMIC_OPS(op, asm)                                             \
161         ATOMIC_OP(op, asm)                                              \
162         ATOMIC_FETCH_OP(op, asm)                                        \
163         ATOMIC64_OP(op, asm)                                            \
164         ATOMIC64_FETCH_OP(op, asm)
165
166 ATOMIC_OPS(and, and)
167 ATOMIC_OPS(andnot, bic)
168 ATOMIC_OPS(or, bis)
169 ATOMIC_OPS(xor, xor)
170
171 #define atomic_fetch_and_relaxed        atomic_fetch_and_relaxed
172 #define atomic_fetch_andnot_relaxed     atomic_fetch_andnot_relaxed
173 #define atomic_fetch_or_relaxed         atomic_fetch_or_relaxed
174 #define atomic_fetch_xor_relaxed        atomic_fetch_xor_relaxed
175
176 #define atomic64_fetch_and_relaxed      atomic64_fetch_and_relaxed
177 #define atomic64_fetch_andnot_relaxed   atomic64_fetch_andnot_relaxed
178 #define atomic64_fetch_or_relaxed       atomic64_fetch_or_relaxed
179 #define atomic64_fetch_xor_relaxed      atomic64_fetch_xor_relaxed
180
181 #undef ATOMIC_OPS
182 #undef ATOMIC64_FETCH_OP
183 #undef ATOMIC64_OP_RETURN
184 #undef ATOMIC64_OP
185 #undef ATOMIC_FETCH_OP
186 #undef ATOMIC_OP_RETURN
187 #undef ATOMIC_OP
188
189 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
190 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
191
192 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
193 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
194
195 /**
196  * __atomic_add_unless - add unless the number is a given value
197  * @v: pointer of type atomic_t
198  * @a: the amount to add to v...
199  * @u: ...unless v is equal to u.
200  *
201  * Atomically adds @a to @v, so long as it was not @u.
202  * Returns the old value of @v.
203  */
204 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
205 {
206         int c, new, old;
207         smp_mb();
208         __asm__ __volatile__(
209         "1:     ldl_l   %[old],%[mem]\n"
210         "       cmpeq   %[old],%[u],%[c]\n"
211         "       addl    %[old],%[a],%[new]\n"
212         "       bne     %[c],2f\n"
213         "       stl_c   %[new],%[mem]\n"
214         "       beq     %[new],3f\n"
215         "2:\n"
216         ".subsection 2\n"
217         "3:     br      1b\n"
218         ".previous"
219         : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
220         : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
221         : "memory");
222         smp_mb();
223         return old;
224 }
225
226
227 /**
228  * atomic64_add_unless - add unless the number is a given value
229  * @v: pointer of type atomic64_t
230  * @a: the amount to add to v...
231  * @u: ...unless v is equal to u.
232  *
233  * Atomically adds @a to @v, so long as it was not @u.
234  * Returns true iff @v was not @u.
235  */
236 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
237 {
238         long c, tmp;
239         smp_mb();
240         __asm__ __volatile__(
241         "1:     ldq_l   %[tmp],%[mem]\n"
242         "       cmpeq   %[tmp],%[u],%[c]\n"
243         "       addq    %[tmp],%[a],%[tmp]\n"
244         "       bne     %[c],2f\n"
245         "       stq_c   %[tmp],%[mem]\n"
246         "       beq     %[tmp],3f\n"
247         "2:\n"
248         ".subsection 2\n"
249         "3:     br      1b\n"
250         ".previous"
251         : [tmp] "=&r"(tmp), [c] "=&r"(c)
252         : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
253         : "memory");
254         smp_mb();
255         return !c;
256 }
257
258 /*
259  * atomic64_dec_if_positive - decrement by 1 if old value positive
260  * @v: pointer of type atomic_t
261  *
262  * The function returns the old value of *v minus 1, even if
263  * the atomic variable, v, was not decremented.
264  */
265 static inline long atomic64_dec_if_positive(atomic64_t *v)
266 {
267         long old, tmp;
268         smp_mb();
269         __asm__ __volatile__(
270         "1:     ldq_l   %[old],%[mem]\n"
271         "       subq    %[old],1,%[tmp]\n"
272         "       ble     %[old],2f\n"
273         "       stq_c   %[tmp],%[mem]\n"
274         "       beq     %[tmp],3f\n"
275         "2:\n"
276         ".subsection 2\n"
277         "3:     br      1b\n"
278         ".previous"
279         : [old] "=&r"(old), [tmp] "=&r"(tmp)
280         : [mem] "m"(*v)
281         : "memory");
282         smp_mb();
283         return old - 1;
284 }
285
286 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
287
288 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
289 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
290
291 #define atomic_dec_return(v) atomic_sub_return(1,(v))
292 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
293
294 #define atomic_inc_return(v) atomic_add_return(1,(v))
295 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
296
297 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
298 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
299
300 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
301 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
302
303 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
304 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
305
306 #define atomic_inc(v) atomic_add(1,(v))
307 #define atomic64_inc(v) atomic64_add(1,(v))
308
309 #define atomic_dec(v) atomic_sub(1,(v))
310 #define atomic64_dec(v) atomic64_sub(1,(v))
311
312 #endif /* _ALPHA_ATOMIC_H */