1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 #ifndef _ASM_ARC_ATOMIC_H
7 #define _ASM_ARC_ATOMIC_H
11 #include <linux/types.h>
12 #include <linux/compiler.h>
13 #include <asm/cmpxchg.h>
14 #include <asm/barrier.h>
17 #define ATOMIC_INIT(i) { (i) }
19 #ifndef CONFIG_ARC_PLAT_EZNPS
21 #define atomic_read(v) READ_ONCE((v)->counter)
23 #ifdef CONFIG_ARC_HAS_LLSC
25 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
27 #define ATOMIC_OP(op, c_op, asm_op) \
28 static inline void atomic_##op(int i, atomic_t *v) \
32 __asm__ __volatile__( \
33 "1: llock %[val], [%[ctr]] \n" \
34 " " #asm_op " %[val], %[val], %[i] \n" \
35 " scond %[val], [%[ctr]] \n" \
37 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
38 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
43 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
44 static inline int atomic_##op##_return(int i, atomic_t *v) \
49 * Explicit full memory barrier needed before/after as \
50 * LLOCK/SCOND thmeselves don't provide any such semantics \
54 __asm__ __volatile__( \
55 "1: llock %[val], [%[ctr]] \n" \
56 " " #asm_op " %[val], %[val], %[i] \n" \
57 " scond %[val], [%[ctr]] \n" \
60 : [ctr] "r" (&v->counter), \
69 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
70 static inline int atomic_fetch_##op(int i, atomic_t *v) \
72 unsigned int val, orig; \
75 * Explicit full memory barrier needed before/after as \
76 * LLOCK/SCOND thmeselves don't provide any such semantics \
80 __asm__ __volatile__( \
81 "1: llock %[orig], [%[ctr]] \n" \
82 " " #asm_op " %[val], %[orig], %[i] \n" \
83 " scond %[val], [%[ctr]] \n" \
85 : [val] "=&r" (val), \
87 : [ctr] "r" (&v->counter), \
96 #else /* !CONFIG_ARC_HAS_LLSC */
100 /* violating atomic_xxx API locking protocol in UP for optimization sake */
101 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
105 static inline void atomic_set(atomic_t *v, int i)
108 * Independent of hardware support, all of the atomic_xxx() APIs need
109 * to follow the same locking rules to make sure that a "hardware"
110 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
113 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
114 * requires the locking.
118 atomic_ops_lock(flags);
119 WRITE_ONCE(v->counter, i);
120 atomic_ops_unlock(flags);
123 #define atomic_set_release(v, i) atomic_set((v), (i))
128 * Non hardware assisted Atomic-R-M-W
129 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
132 #define ATOMIC_OP(op, c_op, asm_op) \
133 static inline void atomic_##op(int i, atomic_t *v) \
135 unsigned long flags; \
137 atomic_ops_lock(flags); \
139 atomic_ops_unlock(flags); \
142 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
143 static inline int atomic_##op##_return(int i, atomic_t *v) \
145 unsigned long flags; \
146 unsigned long temp; \
149 * spin lock/unlock provides the needed smp_mb() before/after \
151 atomic_ops_lock(flags); \
155 atomic_ops_unlock(flags); \
160 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
161 static inline int atomic_fetch_##op(int i, atomic_t *v) \
163 unsigned long flags; \
164 unsigned long orig; \
167 * spin lock/unlock provides the needed smp_mb() before/after \
169 atomic_ops_lock(flags); \
172 atomic_ops_unlock(flags); \
177 #endif /* !CONFIG_ARC_HAS_LLSC */
179 #define ATOMIC_OPS(op, c_op, asm_op) \
180 ATOMIC_OP(op, c_op, asm_op) \
181 ATOMIC_OP_RETURN(op, c_op, asm_op) \
182 ATOMIC_FETCH_OP(op, c_op, asm_op)
184 ATOMIC_OPS(add, +=, add)
185 ATOMIC_OPS(sub, -=, sub)
187 #define atomic_andnot atomic_andnot
188 #define atomic_fetch_andnot atomic_fetch_andnot
191 #define ATOMIC_OPS(op, c_op, asm_op) \
192 ATOMIC_OP(op, c_op, asm_op) \
193 ATOMIC_FETCH_OP(op, c_op, asm_op)
195 ATOMIC_OPS(and, &=, and)
196 ATOMIC_OPS(andnot, &= ~, bic)
197 ATOMIC_OPS(or, |=, or)
198 ATOMIC_OPS(xor, ^=, xor)
200 #else /* CONFIG_ARC_PLAT_EZNPS */
202 static inline int atomic_read(const atomic_t *v)
206 __asm__ __volatile__(
214 static inline void atomic_set(atomic_t *v, int i)
216 __asm__ __volatile__(
219 : "r"(i), "r"(&v->counter)
223 #define ATOMIC_OP(op, c_op, asm_op) \
224 static inline void atomic_##op(int i, atomic_t *v) \
226 __asm__ __volatile__( \
231 : "r"(i), "r"(&v->counter), "i"(asm_op) \
232 : "r2", "r3", "memory"); \
235 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
236 static inline int atomic_##op##_return(int i, atomic_t *v) \
238 unsigned int temp = i; \
240 /* Explicit full memory barrier needed before/after */ \
243 __asm__ __volatile__( \
249 : "r"(&v->counter), "i"(asm_op) \
250 : "r2", "r3", "memory"); \
259 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
260 static inline int atomic_fetch_##op(int i, atomic_t *v) \
262 unsigned int temp = i; \
264 /* Explicit full memory barrier needed before/after */ \
267 __asm__ __volatile__( \
273 : "r"(&v->counter), "i"(asm_op) \
274 : "r2", "r3", "memory"); \
281 #define ATOMIC_OPS(op, c_op, asm_op) \
282 ATOMIC_OP(op, c_op, asm_op) \
283 ATOMIC_OP_RETURN(op, c_op, asm_op) \
284 ATOMIC_FETCH_OP(op, c_op, asm_op)
286 ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
287 #define atomic_sub(i, v) atomic_add(-(i), (v))
288 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
289 #define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
292 #define ATOMIC_OPS(op, c_op, asm_op) \
293 ATOMIC_OP(op, c_op, asm_op) \
294 ATOMIC_FETCH_OP(op, c_op, asm_op)
296 ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
297 ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
298 ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
300 #endif /* CONFIG_ARC_PLAT_EZNPS */
303 #undef ATOMIC_FETCH_OP
304 #undef ATOMIC_OP_RETURN
307 #ifdef CONFIG_GENERIC_ATOMIC64
309 #include <asm-generic/atomic64.h>
311 #else /* Kconfig ensures this is only enabled with needed h/w assist */
314 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
315 * - The address HAS to be 64-bit aligned
316 * - There are 2 semantics involved here:
317 * = exclusive implies no interim update between load/store to same addr
318 * = both words are observed/updated together: this is guaranteed even
319 * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
320 * is NOT required to use LLOCKD+SCONDD, STD suffices
324 s64 __aligned(8) counter;
327 #define ATOMIC64_INIT(a) { (a) }
329 static inline s64 atomic64_read(const atomic64_t *v)
333 __asm__ __volatile__(
341 static inline void atomic64_set(atomic64_t *v, s64 a)
344 * This could have been a simple assignment in "C" but would need
345 * explicit volatile. Otherwise gcc optimizers could elide the store
346 * which borked atomic64 self-test
347 * In the inline asm version, memory clobber needed for exact same
348 * reason, to tell gcc about the store.
350 * This however is not needed for sibling atomic64_add() etc since both
351 * load/store are explicitly done in inline asm. As long as API is used
352 * for each access, gcc has no way to optimize away any load/store
354 __asm__ __volatile__(
357 : "r"(a), "r"(&v->counter)
361 #define ATOMIC64_OP(op, op1, op2) \
362 static inline void atomic64_##op(s64 a, atomic64_t *v) \
366 __asm__ __volatile__( \
368 " llockd %0, [%1] \n" \
369 " " #op1 " %L0, %L0, %L2 \n" \
370 " " #op2 " %H0, %H0, %H2 \n" \
371 " scondd %0, [%1] \n" \
374 : "r"(&v->counter), "ir"(a) \
378 #define ATOMIC64_OP_RETURN(op, op1, op2) \
379 static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
385 __asm__ __volatile__( \
387 " llockd %0, [%1] \n" \
388 " " #op1 " %L0, %L0, %L2 \n" \
389 " " #op2 " %H0, %H0, %H2 \n" \
390 " scondd %0, [%1] \n" \
393 : "r"(&v->counter), "ir"(a) \
394 : "cc"); /* memory clobber comes from smp_mb() */ \
401 #define ATOMIC64_FETCH_OP(op, op1, op2) \
402 static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
408 __asm__ __volatile__( \
410 " llockd %0, [%2] \n" \
411 " " #op1 " %L1, %L0, %L3 \n" \
412 " " #op2 " %H1, %H0, %H3 \n" \
413 " scondd %1, [%2] \n" \
415 : "=&r"(orig), "=&r"(val) \
416 : "r"(&v->counter), "ir"(a) \
417 : "cc"); /* memory clobber comes from smp_mb() */ \
424 #define ATOMIC64_OPS(op, op1, op2) \
425 ATOMIC64_OP(op, op1, op2) \
426 ATOMIC64_OP_RETURN(op, op1, op2) \
427 ATOMIC64_FETCH_OP(op, op1, op2)
429 #define atomic64_andnot atomic64_andnot
430 #define atomic64_fetch_andnot atomic64_fetch_andnot
432 ATOMIC64_OPS(add, add.f, adc)
433 ATOMIC64_OPS(sub, sub.f, sbc)
434 ATOMIC64_OPS(and, and, and)
435 ATOMIC64_OPS(andnot, bic, bic)
436 ATOMIC64_OPS(or, or, or)
437 ATOMIC64_OPS(xor, xor, xor)
440 #undef ATOMIC64_FETCH_OP
441 #undef ATOMIC64_OP_RETURN
445 atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
451 __asm__ __volatile__(
452 "1: llockd %0, [%1] \n"
453 " brne %L0, %L2, 2f \n"
454 " brne %H0, %H2, 2f \n"
455 " scondd %3, [%1] \n"
459 : "r"(ptr), "ir"(expected), "r"(new)
460 : "cc"); /* memory clobber comes from smp_mb() */
467 static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
473 __asm__ __volatile__(
474 "1: llockd %0, [%1] \n"
475 " scondd %2, [%1] \n"
480 : "cc"); /* memory clobber comes from smp_mb() */
488 * atomic64_dec_if_positive - decrement by 1 if old value positive
489 * @v: pointer of type atomic64_t
491 * The function returns the old value of *v minus 1, even if
492 * the atomic variable, v, was not decremented.
495 static inline s64 atomic64_dec_if_positive(atomic64_t *v)
501 __asm__ __volatile__(
502 "1: llockd %0, [%1] \n"
503 " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
504 " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
505 " brlt %H0, 0, 2f \n"
506 " scondd %0, [%1] \n"
511 : "cc"); /* memory clobber comes from smp_mb() */
517 #define atomic64_dec_if_positive atomic64_dec_if_positive
520 * atomic64_fetch_add_unless - add unless the number is a given value
521 * @v: pointer of type atomic64_t
522 * @a: the amount to add to v...
523 * @u: ...unless v is equal to u.
525 * Atomically adds @a to @v, if it was not @u.
526 * Returns the old value of @v
528 static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
534 __asm__ __volatile__(
535 "1: llockd %0, [%2] \n"
536 " brne %L0, %L4, 2f # continue to add since v != u \n"
537 " breq.d %H0, %H4, 3f # return since v == u \n"
539 " add.f %L1, %L0, %L3 \n"
540 " adc %H1, %H0, %H3 \n"
541 " scondd %1, [%2] \n"
544 : "=&r"(old), "=&r" (temp)
545 : "r"(&v->counter), "r"(a), "r"(u)
546 : "cc"); /* memory clobber comes from smp_mb() */
552 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
554 #endif /* !CONFIG_GENERIC_ATOMIC64 */
556 #endif /* !__ASSEMBLY__ */