2 * arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
14 #include <linux/compiler.h>
15 #include <linux/prefetch.h>
16 #include <linux/types.h>
17 #include <linux/irqflags.h>
18 #include <asm/barrier.h>
19 #include <asm/cmpxchg.h>
21 #define ATOMIC_INIT(i) { (i) }
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
30 #define atomic_read(v) READ_ONCE((v)->counter)
31 #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
33 #if __LINUX_ARM_ARCH__ >= 6
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens.
41 #define ATOMIC_OP(op, c_op, asm_op) \
42 static inline void atomic_##op(int i, atomic_t *v) \
47 prefetchw(&v->counter); \
48 __asm__ __volatile__("@ atomic_" #op "\n" \
49 "1: ldrex %0, [%3]\n" \
50 " " #asm_op " %0, %0, %4\n" \
51 " strex %1, %0, [%3]\n" \
54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
55 : "r" (&v->counter), "Ir" (i) \
59 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
60 static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
65 prefetchw(&v->counter); \
67 __asm__ __volatile__("@ atomic_" #op "_return\n" \
68 "1: ldrex %0, [%3]\n" \
69 " " #asm_op " %0, %0, %4\n" \
70 " strex %1, %0, [%3]\n" \
73 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
74 : "r" (&v->counter), "Ir" (i) \
80 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
81 static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
86 prefetchw(&v->counter); \
88 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
89 "1: ldrex %0, [%4]\n" \
90 " " #asm_op " %1, %0, %5\n" \
91 " strex %2, %1, [%4]\n" \
94 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
95 : "r" (&v->counter), "Ir" (i) \
101 #define atomic_add_return_relaxed atomic_add_return_relaxed
102 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
103 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
104 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
106 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
107 #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
108 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
109 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
111 static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
116 prefetchw(&ptr->counter);
119 __asm__ __volatile__("@ atomic_cmpxchg\n"
123 "strexeq %0, %5, [%3]\n"
124 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
125 : "r" (&ptr->counter), "Ir" (old), "r" (new)
131 #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
133 static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
139 prefetchw(&v->counter);
141 __asm__ __volatile__ ("@ atomic_add_unless\n"
142 "1: ldrex %0, [%4]\n"
146 " strex %2, %1, [%4]\n"
150 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
151 : "r" (&v->counter), "r" (u), "r" (a)
159 #define atomic_fetch_add_unless atomic_fetch_add_unless
161 #else /* ARM_ARCH_6 */
164 #error SMP not supported on pre-ARMv6 CPUs
167 #define ATOMIC_OP(op, c_op, asm_op) \
168 static inline void atomic_##op(int i, atomic_t *v) \
170 unsigned long flags; \
172 raw_local_irq_save(flags); \
174 raw_local_irq_restore(flags); \
177 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
178 static inline int atomic_##op##_return(int i, atomic_t *v) \
180 unsigned long flags; \
183 raw_local_irq_save(flags); \
186 raw_local_irq_restore(flags); \
191 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
192 static inline int atomic_fetch_##op(int i, atomic_t *v) \
194 unsigned long flags; \
197 raw_local_irq_save(flags); \
200 raw_local_irq_restore(flags); \
205 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
210 raw_local_irq_save(flags);
212 if (likely(ret == old))
214 raw_local_irq_restore(flags);
219 #define atomic_fetch_andnot atomic_fetch_andnot
221 #endif /* __LINUX_ARM_ARCH__ */
223 #define ATOMIC_OPS(op, c_op, asm_op) \
224 ATOMIC_OP(op, c_op, asm_op) \
225 ATOMIC_OP_RETURN(op, c_op, asm_op) \
226 ATOMIC_FETCH_OP(op, c_op, asm_op)
228 ATOMIC_OPS(add, +=, add)
229 ATOMIC_OPS(sub, -=, sub)
231 #define atomic_andnot atomic_andnot
234 #define ATOMIC_OPS(op, c_op, asm_op) \
235 ATOMIC_OP(op, c_op, asm_op) \
236 ATOMIC_FETCH_OP(op, c_op, asm_op)
238 ATOMIC_OPS(and, &=, and)
239 ATOMIC_OPS(andnot, &= ~, bic)
240 ATOMIC_OPS(or, |=, orr)
241 ATOMIC_OPS(xor, ^=, eor)
244 #undef ATOMIC_FETCH_OP
245 #undef ATOMIC_OP_RETURN
248 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
250 #ifndef CONFIG_GENERIC_ATOMIC64
255 #define ATOMIC64_INIT(i) { (i) }
257 #ifdef CONFIG_ARM_LPAE
258 static inline long long atomic64_read(const atomic64_t *v)
262 __asm__ __volatile__("@ atomic64_read\n"
263 " ldrd %0, %H0, [%1]"
265 : "r" (&v->counter), "Qo" (v->counter)
271 static inline void atomic64_set(atomic64_t *v, long long i)
273 __asm__ __volatile__("@ atomic64_set\n"
274 " strd %2, %H2, [%1]"
276 : "r" (&v->counter), "r" (i)
280 static inline long long atomic64_read(const atomic64_t *v)
284 __asm__ __volatile__("@ atomic64_read\n"
285 " ldrexd %0, %H0, [%1]"
287 : "r" (&v->counter), "Qo" (v->counter)
293 static inline void atomic64_set(atomic64_t *v, long long i)
297 prefetchw(&v->counter);
298 __asm__ __volatile__("@ atomic64_set\n"
299 "1: ldrexd %0, %H0, [%2]\n"
300 " strexd %0, %3, %H3, [%2]\n"
303 : "=&r" (tmp), "=Qo" (v->counter)
304 : "r" (&v->counter), "r" (i)
309 #define ATOMIC64_OP(op, op1, op2) \
310 static inline void atomic64_##op(long long i, atomic64_t *v) \
315 prefetchw(&v->counter); \
316 __asm__ __volatile__("@ atomic64_" #op "\n" \
317 "1: ldrexd %0, %H0, [%3]\n" \
318 " " #op1 " %Q0, %Q0, %Q4\n" \
319 " " #op2 " %R0, %R0, %R4\n" \
320 " strexd %1, %0, %H0, [%3]\n" \
323 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
324 : "r" (&v->counter), "r" (i) \
328 #define ATOMIC64_OP_RETURN(op, op1, op2) \
329 static inline long long \
330 atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
335 prefetchw(&v->counter); \
337 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
338 "1: ldrexd %0, %H0, [%3]\n" \
339 " " #op1 " %Q0, %Q0, %Q4\n" \
340 " " #op2 " %R0, %R0, %R4\n" \
341 " strexd %1, %0, %H0, [%3]\n" \
344 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
345 : "r" (&v->counter), "r" (i) \
351 #define ATOMIC64_FETCH_OP(op, op1, op2) \
352 static inline long long \
353 atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
355 long long result, val; \
358 prefetchw(&v->counter); \
360 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
361 "1: ldrexd %0, %H0, [%4]\n" \
362 " " #op1 " %Q1, %Q0, %Q5\n" \
363 " " #op2 " %R1, %R0, %R5\n" \
364 " strexd %2, %1, %H1, [%4]\n" \
367 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
368 : "r" (&v->counter), "r" (i) \
374 #define ATOMIC64_OPS(op, op1, op2) \
375 ATOMIC64_OP(op, op1, op2) \
376 ATOMIC64_OP_RETURN(op, op1, op2) \
377 ATOMIC64_FETCH_OP(op, op1, op2)
379 ATOMIC64_OPS(add, adds, adc)
380 ATOMIC64_OPS(sub, subs, sbc)
382 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
383 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
384 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
385 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
388 #define ATOMIC64_OPS(op, op1, op2) \
389 ATOMIC64_OP(op, op1, op2) \
390 ATOMIC64_FETCH_OP(op, op1, op2)
392 #define atomic64_andnot atomic64_andnot
394 ATOMIC64_OPS(and, and, and)
395 ATOMIC64_OPS(andnot, bic, bic)
396 ATOMIC64_OPS(or, orr, orr)
397 ATOMIC64_OPS(xor, eor, eor)
399 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
400 #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
401 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
402 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
405 #undef ATOMIC64_FETCH_OP
406 #undef ATOMIC64_OP_RETURN
409 static inline long long
410 atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
415 prefetchw(&ptr->counter);
418 __asm__ __volatile__("@ atomic64_cmpxchg\n"
419 "ldrexd %1, %H1, [%3]\n"
423 "strexdeq %0, %5, %H5, [%3]"
424 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
425 : "r" (&ptr->counter), "r" (old), "r" (new)
431 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
433 static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
438 prefetchw(&ptr->counter);
440 __asm__ __volatile__("@ atomic64_xchg\n"
441 "1: ldrexd %0, %H0, [%3]\n"
442 " strexd %1, %4, %H4, [%3]\n"
445 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
446 : "r" (&ptr->counter), "r" (new)
451 #define atomic64_xchg_relaxed atomic64_xchg_relaxed
453 static inline long long atomic64_dec_if_positive(atomic64_t *v)
459 prefetchw(&v->counter);
461 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
462 "1: ldrexd %0, %H0, [%3]\n"
463 " subs %Q0, %Q0, #1\n"
464 " sbc %R0, %R0, #0\n"
467 " strexd %1, %0, %H0, [%3]\n"
471 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
479 #define atomic64_dec_if_positive atomic64_dec_if_positive
481 static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
484 long long oldval, newval;
488 prefetchw(&v->counter);
490 __asm__ __volatile__("@ atomic64_add_unless\n"
491 "1: ldrexd %0, %H0, [%4]\n"
495 " adds %Q1, %Q0, %Q6\n"
496 " adc %R1, %R0, %R6\n"
497 " strexd %2, %1, %H1, [%4]\n"
501 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
502 : "r" (&v->counter), "r" (u), "r" (a)
510 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
512 #endif /* !CONFIG_GENERIC_ATOMIC64 */