2 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
12 #ifndef _ASM_RISCV_ATOMIC_H
13 #define _ASM_RISCV_ATOMIC_H
15 #ifdef CONFIG_GENERIC_ATOMIC64
16 # include <asm-generic/atomic64.h>
18 # if (__riscv_xlen < 64)
19 # error "64-bit atomics require XLEN to be at least 64"
23 #include <asm/cmpxchg.h>
24 #include <asm/barrier.h>
26 #define ATOMIC_INIT(i) { (i) }
28 #define __atomic_op_acquire(op, args...) \
30 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
31 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory"); \
35 #define __atomic_op_release(op, args...) \
37 __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); \
41 static __always_inline int atomic_read(const atomic_t *v)
43 return READ_ONCE(v->counter);
45 static __always_inline void atomic_set(atomic_t *v, int i)
47 WRITE_ONCE(v->counter, i);
50 #ifndef CONFIG_GENERIC_ATOMIC64
51 #define ATOMIC64_INIT(i) { (i) }
52 static __always_inline long atomic64_read(const atomic64_t *v)
54 return READ_ONCE(v->counter);
56 static __always_inline void atomic64_set(atomic64_t *v, long i)
58 WRITE_ONCE(v->counter, i);
63 * First, the atomic ops that have no ordering constraints and therefor don't
64 * have the AQ or RL bits set. These don't return anything, so there's only
65 * one version to worry about.
67 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
68 static __always_inline \
69 void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
71 __asm__ __volatile__ ( \
72 " amo" #asm_op "." #asm_type " zero, %1, %0" \
78 #ifdef CONFIG_GENERIC_ATOMIC64
79 #define ATOMIC_OPS(op, asm_op, I) \
80 ATOMIC_OP (op, asm_op, I, w, int, )
82 #define ATOMIC_OPS(op, asm_op, I) \
83 ATOMIC_OP (op, asm_op, I, w, int, ) \
84 ATOMIC_OP (op, asm_op, I, d, long, 64)
87 ATOMIC_OPS(add, add, i)
88 ATOMIC_OPS(sub, add, -i)
89 ATOMIC_OPS(and, and, i)
90 ATOMIC_OPS( or, or, i)
91 ATOMIC_OPS(xor, xor, i)
97 * Atomic ops that have ordered, relaxed, acquire, and release variants.
98 * There's two flavors of these: the arithmatic ops have both fetch and return
99 * versions, while the logical ops only have fetch versions.
101 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
102 static __always_inline \
103 c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
104 atomic##prefix##_t *v) \
106 register c_type ret; \
107 __asm__ __volatile__ ( \
108 " amo" #asm_op "." #asm_type " %1, %2, %0" \
109 : "+A" (v->counter), "=r" (ret) \
114 static __always_inline \
115 c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
117 register c_type ret; \
118 __asm__ __volatile__ ( \
119 " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
120 : "+A" (v->counter), "=r" (ret) \
126 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
127 static __always_inline \
128 c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
129 atomic##prefix##_t *v) \
131 return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
133 static __always_inline \
134 c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
136 return atomic##prefix##_fetch_##op(i, v) c_op I; \
139 #ifdef CONFIG_GENERIC_ATOMIC64
140 #define ATOMIC_OPS(op, asm_op, c_op, I) \
141 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
142 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
144 #define ATOMIC_OPS(op, asm_op, c_op, I) \
145 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
146 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
147 ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \
148 ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
151 ATOMIC_OPS(add, add, +, i)
152 ATOMIC_OPS(sub, add, +, -i)
154 #define atomic_add_return_relaxed atomic_add_return_relaxed
155 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
156 #define atomic_add_return atomic_add_return
157 #define atomic_sub_return atomic_sub_return
159 #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
160 #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
161 #define atomic_fetch_add atomic_fetch_add
162 #define atomic_fetch_sub atomic_fetch_sub
164 #ifndef CONFIG_GENERIC_ATOMIC64
165 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
166 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
167 #define atomic64_add_return atomic64_add_return
168 #define atomic64_sub_return atomic64_sub_return
170 #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
171 #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
172 #define atomic64_fetch_add atomic64_fetch_add
173 #define atomic64_fetch_sub atomic64_fetch_sub
178 #ifdef CONFIG_GENERIC_ATOMIC64
179 #define ATOMIC_OPS(op, asm_op, I) \
180 ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
182 #define ATOMIC_OPS(op, asm_op, I) \
183 ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
184 ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
187 ATOMIC_OPS(and, and, i)
188 ATOMIC_OPS( or, or, i)
189 ATOMIC_OPS(xor, xor, i)
191 #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
192 #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
193 #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
194 #define atomic_fetch_and atomic_fetch_and
195 #define atomic_fetch_or atomic_fetch_or
196 #define atomic_fetch_xor atomic_fetch_xor
198 #ifndef CONFIG_GENERIC_ATOMIC64
199 #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
200 #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
201 #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
202 #define atomic64_fetch_and atomic64_fetch_and
203 #define atomic64_fetch_or atomic64_fetch_or
204 #define atomic64_fetch_xor atomic64_fetch_xor
209 #undef ATOMIC_FETCH_OP
210 #undef ATOMIC_OP_RETURN
213 * The extra atomic operations that are constructed from one of the core
214 * AMO-based operations above (aside from sub, which is easier to fit above).
215 * These are required to perform a full barrier, but they're OK this way
216 * because atomic_*_return is also required to perform a full barrier.
219 #define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
220 static __always_inline \
221 bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
223 return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
226 #ifdef CONFIG_GENERIC_ATOMIC64
227 #define ATOMIC_OPS(op, func_op, comp_op, I) \
228 ATOMIC_OP(op, func_op, comp_op, I, int, )
230 #define ATOMIC_OPS(op, func_op, comp_op, I) \
231 ATOMIC_OP(op, func_op, comp_op, I, int, ) \
232 ATOMIC_OP(op, func_op, comp_op, I, long, 64)
235 ATOMIC_OPS(add_and_test, add, ==, 0)
236 ATOMIC_OPS(sub_and_test, sub, ==, 0)
237 ATOMIC_OPS(add_negative, add, <, 0)
242 #define ATOMIC_OP(op, func_op, I, c_type, prefix) \
243 static __always_inline \
244 void atomic##prefix##_##op(atomic##prefix##_t *v) \
246 atomic##prefix##_##func_op(I, v); \
249 #define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
250 static __always_inline \
251 c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v) \
253 return atomic##prefix##_fetch_##func_op##_relaxed(I, v); \
255 static __always_inline \
256 c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
258 return atomic##prefix##_fetch_##func_op(I, v); \
261 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
262 static __always_inline \
263 c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v) \
265 return atomic##prefix##_fetch_##op##_relaxed(v) c_op I; \
267 static __always_inline \
268 c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
270 return atomic##prefix##_fetch_##op(v) c_op I; \
273 #ifdef CONFIG_GENERIC_ATOMIC64
274 #define ATOMIC_OPS(op, asm_op, c_op, I) \
275 ATOMIC_OP( op, asm_op, I, int, ) \
276 ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
277 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
279 #define ATOMIC_OPS(op, asm_op, c_op, I) \
280 ATOMIC_OP( op, asm_op, I, int, ) \
281 ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
282 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
283 ATOMIC_OP( op, asm_op, I, long, 64) \
284 ATOMIC_FETCH_OP( op, asm_op, I, long, 64) \
285 ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
288 ATOMIC_OPS(inc, add, +, 1)
289 ATOMIC_OPS(dec, add, +, -1)
291 #define atomic_inc_return_relaxed atomic_inc_return_relaxed
292 #define atomic_dec_return_relaxed atomic_dec_return_relaxed
293 #define atomic_inc_return atomic_inc_return
294 #define atomic_dec_return atomic_dec_return
296 #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
297 #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
298 #define atomic_fetch_inc atomic_fetch_inc
299 #define atomic_fetch_dec atomic_fetch_dec
301 #ifndef CONFIG_GENERIC_ATOMIC64
302 #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
303 #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
304 #define atomic64_inc_return atomic64_inc_return
305 #define atomic64_dec_return atomic64_dec_return
307 #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
308 #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
309 #define atomic64_fetch_inc atomic64_fetch_inc
310 #define atomic64_fetch_dec atomic64_fetch_dec
315 #undef ATOMIC_FETCH_OP
316 #undef ATOMIC_OP_RETURN
318 #define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
319 static __always_inline \
320 bool atomic##prefix##_##op(atomic##prefix##_t *v) \
322 return atomic##prefix##_##func_op##_return(v) comp_op I; \
325 ATOMIC_OP(inc_and_test, inc, ==, 0, )
326 ATOMIC_OP(dec_and_test, dec, ==, 0, )
327 #ifndef CONFIG_GENERIC_ATOMIC64
328 ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
329 ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
334 /* This is required to provide a full barrier on success. */
335 static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
339 __asm__ __volatile__ (
340 "0: lr.w %[p], %[c]\n"
341 " beq %[p], %[u], 1f\n"
342 " add %[rc], %[p], %[a]\n"
343 " sc.w.rl %[rc], %[rc], %[c]\n"
347 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
348 : [a]"r" (a), [u]"r" (u)
353 #ifndef CONFIG_GENERIC_ATOMIC64
354 static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
358 __asm__ __volatile__ (
359 "0: lr.d %[p], %[c]\n"
360 " beq %[p], %[u], 1f\n"
361 " add %[rc], %[p], %[a]\n"
362 " sc.d.rl %[rc], %[rc], %[c]\n"
366 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
367 : [a]"r" (a), [u]"r" (u)
372 static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
374 return __atomic64_add_unless(v, a, u) != u;
379 * The extra atomic operations that are constructed from one of the core
380 * LR/SC-based operations above.
382 static __always_inline int atomic_inc_not_zero(atomic_t *v)
384 return __atomic_add_unless(v, 1, 0);
387 #ifndef CONFIG_GENERIC_ATOMIC64
388 static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
390 return atomic64_add_unless(v, 1, 0);
395 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
396 * {cmp,}xchg and the operations that return, so they need a full barrier.
398 #define ATOMIC_OP(c_t, prefix, size) \
399 static __always_inline \
400 c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
402 return __xchg_relaxed(&(v->counter), n, size); \
404 static __always_inline \
405 c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
407 return __xchg_acquire(&(v->counter), n, size); \
409 static __always_inline \
410 c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
412 return __xchg_release(&(v->counter), n, size); \
414 static __always_inline \
415 c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
417 return __xchg(&(v->counter), n, size); \
419 static __always_inline \
420 c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
423 return __cmpxchg_relaxed(&(v->counter), o, n, size); \
425 static __always_inline \
426 c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
429 return __cmpxchg_acquire(&(v->counter), o, n, size); \
431 static __always_inline \
432 c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
435 return __cmpxchg_release(&(v->counter), o, n, size); \
437 static __always_inline \
438 c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
440 return __cmpxchg(&(v->counter), o, n, size); \
443 #ifdef CONFIG_GENERIC_ATOMIC64
444 #define ATOMIC_OPS() \
447 #define ATOMIC_OPS() \
448 ATOMIC_OP( int, , 4) \
449 ATOMIC_OP(long, 64, 8)
457 static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
461 __asm__ __volatile__ (
462 "0: lr.w %[p], %[c]\n"
463 " sub %[rc], %[p], %[o]\n"
465 " sc.w.rl %[rc], %[rc], %[c]\n"
469 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
472 return prev - offset;
475 #define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
477 #ifndef CONFIG_GENERIC_ATOMIC64
478 static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
482 __asm__ __volatile__ (
483 "0: lr.d %[p], %[c]\n"
484 " sub %[rc], %[p], %[o]\n"
486 " sc.d.rl %[rc], %[rc], %[c]\n"
490 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
493 return prev - offset;
496 #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
499 #endif /* _ASM_RISCV_ATOMIC_H */