1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_IA64_ATOMIC_H
3 #define _ASM_IA64_ATOMIC_H
6 * Atomic operations that C can't guarantee us. Useful for
7 * resource counting etc..
9 * NOTE: don't mess with the types below! The "unsigned long" and
10 * "int" types were carefully placed so as to ensure proper operation
13 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
14 * David Mosberger-Tang <davidm@hpl.hp.com>
16 #include <linux/types.h>
18 #include <asm/intrinsics.h>
19 #include <asm/barrier.h>
22 #define ATOMIC_INIT(i) { (i) }
23 #define ATOMIC64_INIT(i) { (i) }
25 #define atomic_read(v) READ_ONCE((v)->counter)
26 #define atomic64_read(v) READ_ONCE((v)->counter)
28 #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
29 #define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
31 #define ATOMIC_OP(op, c_op) \
32 static __inline__ int \
33 ia64_atomic_##op (int i, atomic_t *v) \
36 CMPXCHG_BUGCHECK_DECL \
39 CMPXCHG_BUGCHECK(v); \
40 old = atomic_read(v); \
42 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
46 #define ATOMIC_FETCH_OP(op, c_op) \
47 static __inline__ int \
48 ia64_atomic_fetch_##op (int i, atomic_t *v) \
51 CMPXCHG_BUGCHECK_DECL \
54 CMPXCHG_BUGCHECK(v); \
55 old = atomic_read(v); \
57 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
61 #define ATOMIC_OPS(op, c_op) \
63 ATOMIC_FETCH_OP(op, c_op)
69 #define __ia64_atomic_const(i) __builtin_constant_p(i) ? \
70 ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
71 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0
73 #define atomic_add_return(i, v) \
76 static const int __ia64_atomic_p = __ia64_atomic_const(i); \
77 __ia64_atomic_p ? ia64_fetch_and_add(__i, &(v)->counter) : \
78 ia64_atomic_add(__i, v); \
81 #define atomic_sub_return(i, v) \
84 static const int __ia64_atomic_p = __ia64_atomic_const(i); \
85 __ia64_atomic_p ? ia64_fetch_and_add(-__i, &(v)->counter) : \
86 ia64_atomic_sub(__i, v); \
89 #define atomic_add_return(i, v) ia64_atomic_add(i, v)
90 #define atomic_sub_return(i, v) ia64_atomic_sub(i, v)
93 #define atomic_fetch_add(i,v) \
95 int __ia64_aar_i = (i); \
96 (__builtin_constant_p(i) \
97 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
98 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
99 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
100 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
101 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
102 : ia64_atomic_fetch_add(__ia64_aar_i, v); \
105 #define atomic_fetch_sub(i,v) \
107 int __ia64_asr_i = (i); \
108 (__builtin_constant_p(i) \
109 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
110 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
111 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
112 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
113 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
114 : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
117 ATOMIC_FETCH_OP(and, &)
118 ATOMIC_FETCH_OP(or, |)
119 ATOMIC_FETCH_OP(xor, ^)
121 #define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
122 #define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
123 #define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
125 #define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
126 #define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
127 #define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
130 #undef ATOMIC_FETCH_OP
133 #define ATOMIC64_OP(op, c_op) \
134 static __inline__ long \
135 ia64_atomic64_##op (__s64 i, atomic64_t *v) \
138 CMPXCHG_BUGCHECK_DECL \
141 CMPXCHG_BUGCHECK(v); \
142 old = atomic64_read(v); \
144 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
148 #define ATOMIC64_FETCH_OP(op, c_op) \
149 static __inline__ long \
150 ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v) \
153 CMPXCHG_BUGCHECK_DECL \
156 CMPXCHG_BUGCHECK(v); \
157 old = atomic64_read(v); \
159 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
163 #define ATOMIC64_OPS(op, c_op) \
164 ATOMIC64_OP(op, c_op) \
165 ATOMIC64_FETCH_OP(op, c_op)
170 #define atomic64_add_return(i,v) \
172 long __ia64_aar_i = (i); \
173 (__builtin_constant_p(i) \
174 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
175 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
176 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
177 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
178 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
179 : ia64_atomic64_add(__ia64_aar_i, v); \
182 #define atomic64_sub_return(i,v) \
184 long __ia64_asr_i = (i); \
185 (__builtin_constant_p(i) \
186 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
187 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
188 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
189 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
190 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
191 : ia64_atomic64_sub(__ia64_asr_i, v); \
194 #define atomic64_fetch_add(i,v) \
196 long __ia64_aar_i = (i); \
197 (__builtin_constant_p(i) \
198 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
199 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
200 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
201 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
202 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
203 : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
206 #define atomic64_fetch_sub(i,v) \
208 long __ia64_asr_i = (i); \
209 (__builtin_constant_p(i) \
210 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
211 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
212 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
213 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
214 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
215 : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
218 ATOMIC64_FETCH_OP(and, &)
219 ATOMIC64_FETCH_OP(or, |)
220 ATOMIC64_FETCH_OP(xor, ^)
222 #define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
223 #define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
224 #define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
226 #define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
227 #define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
228 #define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
231 #undef ATOMIC64_FETCH_OP
234 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
235 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
237 #define atomic64_cmpxchg(v, old, new) \
238 (cmpxchg(&((v)->counter), old, new))
239 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
241 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
246 if (unlikely(c == (u)))
248 old = atomic_cmpxchg((v), c, c + (a));
249 if (likely(old == c))
257 static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
260 c = atomic64_read(v);
262 if (unlikely(c == (u)))
264 old = atomic64_cmpxchg((v), c, c + (a));
265 if (likely(old == c))
272 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
274 static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
277 c = atomic64_read(v);
280 if (unlikely(dec < 0))
282 old = atomic64_cmpxchg((v), c, dec);
283 if (likely(old == c))
291 * Atomically add I to V and return TRUE if the resulting value is
294 static __inline__ int
295 atomic_add_negative (int i, atomic_t *v)
297 return atomic_add_return(i, v) < 0;
300 static __inline__ long
301 atomic64_add_negative (__s64 i, atomic64_t *v)
303 return atomic64_add_return(i, v) < 0;
306 #define atomic_dec_return(v) atomic_sub_return(1, (v))
307 #define atomic_inc_return(v) atomic_add_return(1, (v))
308 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
309 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
311 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
312 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
313 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
314 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
315 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
316 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
318 #define atomic_add(i,v) (void)atomic_add_return((i), (v))
319 #define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
320 #define atomic_inc(v) atomic_add(1, (v))
321 #define atomic_dec(v) atomic_sub(1, (v))
323 #define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
324 #define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
325 #define atomic64_inc(v) atomic64_add(1, (v))
326 #define atomic64_dec(v) atomic64_sub(1, (v))
328 #endif /* _ASM_IA64_ATOMIC_H */