Merge branch 'for-4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
[sfrench/cifs-2.6.git] / arch / alpha / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ALPHA_ATOMIC_H
3 #define _ALPHA_ATOMIC_H
4
5 #include <linux/types.h>
6 #include <asm/barrier.h>
7 #include <asm/cmpxchg.h>
8
9 /*
10  * Atomic operations that C can't guarantee us.  Useful for
11  * resource counting etc...
12  *
13  * But use these as seldom as possible since they are much slower
14  * than regular operations.
15  */
16
17 /*
18  * To ensure dependency ordering is preserved for the _relaxed and
19  * _release atomics, an smp_read_barrier_depends() is unconditionally
20  * inserted into the _relaxed variants, which are used to build the
21  * barriered versions. To avoid redundant back-to-back fences, we can
22  * define the _acquire and _fence versions explicitly.
23  */
24 #define __atomic_op_acquire(op, args...)        op##_relaxed(args)
25 #define __atomic_op_fence                       __atomic_op_release
26
27 #define ATOMIC_INIT(i)          { (i) }
28 #define ATOMIC64_INIT(i)        { (i) }
29
30 #define atomic_read(v)          READ_ONCE((v)->counter)
31 #define atomic64_read(v)        READ_ONCE((v)->counter)
32
33 #define atomic_set(v,i)         WRITE_ONCE((v)->counter, (i))
34 #define atomic64_set(v,i)       WRITE_ONCE((v)->counter, (i))
35
36 /*
37  * To get proper branch prediction for the main line, we must branch
38  * forward to code at the end of this object's .text section, then
39  * branch back to restart the operation.
40  */
41
42 #define ATOMIC_OP(op, asm_op)                                           \
43 static __inline__ void atomic_##op(int i, atomic_t * v)                 \
44 {                                                                       \
45         unsigned long temp;                                             \
46         __asm__ __volatile__(                                           \
47         "1:     ldl_l %0,%1\n"                                          \
48         "       " #asm_op " %0,%2,%0\n"                                 \
49         "       stl_c %0,%1\n"                                          \
50         "       beq %0,2f\n"                                            \
51         ".subsection 2\n"                                               \
52         "2:     br 1b\n"                                                \
53         ".previous"                                                     \
54         :"=&r" (temp), "=m" (v->counter)                                \
55         :"Ir" (i), "m" (v->counter));                                   \
56 }                                                                       \
57
58 #define ATOMIC_OP_RETURN(op, asm_op)                                    \
59 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)      \
60 {                                                                       \
61         long temp, result;                                              \
62         __asm__ __volatile__(                                           \
63         "1:     ldl_l %0,%1\n"                                          \
64         "       " #asm_op " %0,%3,%2\n"                                 \
65         "       " #asm_op " %0,%3,%0\n"                                 \
66         "       stl_c %0,%1\n"                                          \
67         "       beq %0,2f\n"                                            \
68         ".subsection 2\n"                                               \
69         "2:     br 1b\n"                                                \
70         ".previous"                                                     \
71         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
72         :"Ir" (i), "m" (v->counter) : "memory");                        \
73         smp_read_barrier_depends();                                     \
74         return result;                                                  \
75 }
76
77 #define ATOMIC_FETCH_OP(op, asm_op)                                     \
78 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)       \
79 {                                                                       \
80         long temp, result;                                              \
81         __asm__ __volatile__(                                           \
82         "1:     ldl_l %2,%1\n"                                          \
83         "       " #asm_op " %2,%3,%0\n"                                 \
84         "       stl_c %0,%1\n"                                          \
85         "       beq %0,2f\n"                                            \
86         ".subsection 2\n"                                               \
87         "2:     br 1b\n"                                                \
88         ".previous"                                                     \
89         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
90         :"Ir" (i), "m" (v->counter) : "memory");                        \
91         smp_read_barrier_depends();                                     \
92         return result;                                                  \
93 }
94
95 #define ATOMIC64_OP(op, asm_op)                                         \
96 static __inline__ void atomic64_##op(long i, atomic64_t * v)            \
97 {                                                                       \
98         unsigned long temp;                                             \
99         __asm__ __volatile__(                                           \
100         "1:     ldq_l %0,%1\n"                                          \
101         "       " #asm_op " %0,%2,%0\n"                                 \
102         "       stq_c %0,%1\n"                                          \
103         "       beq %0,2f\n"                                            \
104         ".subsection 2\n"                                               \
105         "2:     br 1b\n"                                                \
106         ".previous"                                                     \
107         :"=&r" (temp), "=m" (v->counter)                                \
108         :"Ir" (i), "m" (v->counter));                                   \
109 }                                                                       \
110
111 #define ATOMIC64_OP_RETURN(op, asm_op)                                  \
112 static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v)   \
113 {                                                                       \
114         long temp, result;                                              \
115         __asm__ __volatile__(                                           \
116         "1:     ldq_l %0,%1\n"                                          \
117         "       " #asm_op " %0,%3,%2\n"                                 \
118         "       " #asm_op " %0,%3,%0\n"                                 \
119         "       stq_c %0,%1\n"                                          \
120         "       beq %0,2f\n"                                            \
121         ".subsection 2\n"                                               \
122         "2:     br 1b\n"                                                \
123         ".previous"                                                     \
124         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
125         :"Ir" (i), "m" (v->counter) : "memory");                        \
126         smp_read_barrier_depends();                                     \
127         return result;                                                  \
128 }
129
130 #define ATOMIC64_FETCH_OP(op, asm_op)                                   \
131 static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)    \
132 {                                                                       \
133         long temp, result;                                              \
134         __asm__ __volatile__(                                           \
135         "1:     ldq_l %2,%1\n"                                          \
136         "       " #asm_op " %2,%3,%0\n"                                 \
137         "       stq_c %0,%1\n"                                          \
138         "       beq %0,2f\n"                                            \
139         ".subsection 2\n"                                               \
140         "2:     br 1b\n"                                                \
141         ".previous"                                                     \
142         :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
143         :"Ir" (i), "m" (v->counter) : "memory");                        \
144         smp_read_barrier_depends();                                     \
145         return result;                                                  \
146 }
147
148 #define ATOMIC_OPS(op)                                                  \
149         ATOMIC_OP(op, op##l)                                            \
150         ATOMIC_OP_RETURN(op, op##l)                                     \
151         ATOMIC_FETCH_OP(op, op##l)                                      \
152         ATOMIC64_OP(op, op##q)                                          \
153         ATOMIC64_OP_RETURN(op, op##q)                                   \
154         ATOMIC64_FETCH_OP(op, op##q)
155
156 ATOMIC_OPS(add)
157 ATOMIC_OPS(sub)
158
159 #define atomic_add_return_relaxed       atomic_add_return_relaxed
160 #define atomic_sub_return_relaxed       atomic_sub_return_relaxed
161 #define atomic_fetch_add_relaxed        atomic_fetch_add_relaxed
162 #define atomic_fetch_sub_relaxed        atomic_fetch_sub_relaxed
163
164 #define atomic64_add_return_relaxed     atomic64_add_return_relaxed
165 #define atomic64_sub_return_relaxed     atomic64_sub_return_relaxed
166 #define atomic64_fetch_add_relaxed      atomic64_fetch_add_relaxed
167 #define atomic64_fetch_sub_relaxed      atomic64_fetch_sub_relaxed
168
169 #define atomic_andnot atomic_andnot
170 #define atomic64_andnot atomic64_andnot
171
172 #undef ATOMIC_OPS
173 #define ATOMIC_OPS(op, asm)                                             \
174         ATOMIC_OP(op, asm)                                              \
175         ATOMIC_FETCH_OP(op, asm)                                        \
176         ATOMIC64_OP(op, asm)                                            \
177         ATOMIC64_FETCH_OP(op, asm)
178
179 ATOMIC_OPS(and, and)
180 ATOMIC_OPS(andnot, bic)
181 ATOMIC_OPS(or, bis)
182 ATOMIC_OPS(xor, xor)
183
184 #define atomic_fetch_and_relaxed        atomic_fetch_and_relaxed
185 #define atomic_fetch_andnot_relaxed     atomic_fetch_andnot_relaxed
186 #define atomic_fetch_or_relaxed         atomic_fetch_or_relaxed
187 #define atomic_fetch_xor_relaxed        atomic_fetch_xor_relaxed
188
189 #define atomic64_fetch_and_relaxed      atomic64_fetch_and_relaxed
190 #define atomic64_fetch_andnot_relaxed   atomic64_fetch_andnot_relaxed
191 #define atomic64_fetch_or_relaxed       atomic64_fetch_or_relaxed
192 #define atomic64_fetch_xor_relaxed      atomic64_fetch_xor_relaxed
193
194 #undef ATOMIC_OPS
195 #undef ATOMIC64_FETCH_OP
196 #undef ATOMIC64_OP_RETURN
197 #undef ATOMIC64_OP
198 #undef ATOMIC_FETCH_OP
199 #undef ATOMIC_OP_RETURN
200 #undef ATOMIC_OP
201
202 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
203 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
204
205 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
206 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
207
208 /**
209  * __atomic_add_unless - add unless the number is a given value
210  * @v: pointer of type atomic_t
211  * @a: the amount to add to v...
212  * @u: ...unless v is equal to u.
213  *
214  * Atomically adds @a to @v, so long as it was not @u.
215  * Returns the old value of @v.
216  */
217 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
218 {
219         int c, new, old;
220         smp_mb();
221         __asm__ __volatile__(
222         "1:     ldl_l   %[old],%[mem]\n"
223         "       cmpeq   %[old],%[u],%[c]\n"
224         "       addl    %[old],%[a],%[new]\n"
225         "       bne     %[c],2f\n"
226         "       stl_c   %[new],%[mem]\n"
227         "       beq     %[new],3f\n"
228         "2:\n"
229         ".subsection 2\n"
230         "3:     br      1b\n"
231         ".previous"
232         : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
233         : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
234         : "memory");
235         smp_mb();
236         return old;
237 }
238
239
240 /**
241  * atomic64_add_unless - add unless the number is a given value
242  * @v: pointer of type atomic64_t
243  * @a: the amount to add to v...
244  * @u: ...unless v is equal to u.
245  *
246  * Atomically adds @a to @v, so long as it was not @u.
247  * Returns true iff @v was not @u.
248  */
249 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
250 {
251         long c, tmp;
252         smp_mb();
253         __asm__ __volatile__(
254         "1:     ldq_l   %[tmp],%[mem]\n"
255         "       cmpeq   %[tmp],%[u],%[c]\n"
256         "       addq    %[tmp],%[a],%[tmp]\n"
257         "       bne     %[c],2f\n"
258         "       stq_c   %[tmp],%[mem]\n"
259         "       beq     %[tmp],3f\n"
260         "2:\n"
261         ".subsection 2\n"
262         "3:     br      1b\n"
263         ".previous"
264         : [tmp] "=&r"(tmp), [c] "=&r"(c)
265         : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
266         : "memory");
267         smp_mb();
268         return !c;
269 }
270
271 /*
272  * atomic64_dec_if_positive - decrement by 1 if old value positive
273  * @v: pointer of type atomic_t
274  *
275  * The function returns the old value of *v minus 1, even if
276  * the atomic variable, v, was not decremented.
277  */
278 static inline long atomic64_dec_if_positive(atomic64_t *v)
279 {
280         long old, tmp;
281         smp_mb();
282         __asm__ __volatile__(
283         "1:     ldq_l   %[old],%[mem]\n"
284         "       subq    %[old],1,%[tmp]\n"
285         "       ble     %[old],2f\n"
286         "       stq_c   %[tmp],%[mem]\n"
287         "       beq     %[tmp],3f\n"
288         "2:\n"
289         ".subsection 2\n"
290         "3:     br      1b\n"
291         ".previous"
292         : [old] "=&r"(old), [tmp] "=&r"(tmp)
293         : [mem] "m"(*v)
294         : "memory");
295         smp_mb();
296         return old - 1;
297 }
298
299 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
300
301 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
302 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
303
304 #define atomic_dec_return(v) atomic_sub_return(1,(v))
305 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
306
307 #define atomic_inc_return(v) atomic_add_return(1,(v))
308 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
309
310 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
311 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
312
313 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
314 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
315
316 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
317 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
318
319 #define atomic_inc(v) atomic_add(1,(v))
320 #define atomic64_inc(v) atomic64_add(1,(v))
321
322 #define atomic_dec(v) atomic_sub(1,(v))
323 #define atomic64_dec(v) atomic64_sub(1,(v))
324
325 #endif /* _ALPHA_ATOMIC_H */