1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC64_64_H
3 #define _ASM_X86_ATOMIC64_64_H
5 #include <linux/types.h>
6 #include <asm/alternative.h>
7 #include <asm/cmpxchg.h>
9 /* The 64-bit atomic type */
11 #define ATOMIC64_INIT(i) { (i) }
13 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
15 return __READ_ONCE((v)->counter);
18 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
20 __WRITE_ONCE(v->counter, i);
23 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
25 asm volatile(LOCK_PREFIX "addq %1,%0"
27 : "er" (i), "m" (v->counter) : "memory");
30 static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
32 asm volatile(LOCK_PREFIX "subq %1,%0"
34 : "er" (i), "m" (v->counter) : "memory");
37 static __always_inline bool arch_atomic64_sub_and_test(s64 i, atomic64_t *v)
39 return GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, e, "er", i);
41 #define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
43 static __always_inline void arch_atomic64_inc(atomic64_t *v)
45 asm volatile(LOCK_PREFIX "incq %0"
47 : "m" (v->counter) : "memory");
49 #define arch_atomic64_inc arch_atomic64_inc
51 static __always_inline void arch_atomic64_dec(atomic64_t *v)
53 asm volatile(LOCK_PREFIX "decq %0"
55 : "m" (v->counter) : "memory");
57 #define arch_atomic64_dec arch_atomic64_dec
59 static __always_inline bool arch_atomic64_dec_and_test(atomic64_t *v)
61 return GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, e);
63 #define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
65 static __always_inline bool arch_atomic64_inc_and_test(atomic64_t *v)
67 return GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, e);
69 #define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
71 static __always_inline bool arch_atomic64_add_negative(s64 i, atomic64_t *v)
73 return GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, s, "er", i);
75 #define arch_atomic64_add_negative arch_atomic64_add_negative
77 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
79 return i + xadd(&v->counter, i);
81 #define arch_atomic64_add_return arch_atomic64_add_return
83 static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
85 return arch_atomic64_add_return(-i, v);
87 #define arch_atomic64_sub_return arch_atomic64_sub_return
89 static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
91 return xadd(&v->counter, i);
93 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
95 static __always_inline s64 arch_atomic64_fetch_sub(s64 i, atomic64_t *v)
97 return xadd(&v->counter, -i);
99 #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
101 static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
103 return arch_cmpxchg(&v->counter, old, new);
105 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
107 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
109 return arch_try_cmpxchg(&v->counter, old, new);
111 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
113 static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
115 return arch_xchg(&v->counter, new);
117 #define arch_atomic64_xchg arch_atomic64_xchg
119 static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
121 asm volatile(LOCK_PREFIX "andq %1,%0"
127 static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
129 s64 val = arch_atomic64_read(v);
132 } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
135 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
137 static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
139 asm volatile(LOCK_PREFIX "orq %1,%0"
145 static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
147 s64 val = arch_atomic64_read(v);
150 } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
153 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
155 static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
157 asm volatile(LOCK_PREFIX "xorq %1,%0"
163 static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
165 s64 val = arch_atomic64_read(v);
168 } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
171 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
173 #endif /* _ASM_X86_ATOMIC64_64_H */