Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[sfrench/cifs-2.6.git] / arch / s390 / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright IBM Corp. 1999, 2016
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5  *            Denis Joseph Barrow,
6  *            Arnd Bergmann,
7  */
8
9 #ifndef __ARCH_S390_ATOMIC__
10 #define __ARCH_S390_ATOMIC__
11
12 #include <linux/compiler.h>
13 #include <linux/types.h>
14 #include <asm/atomic_ops.h>
15 #include <asm/barrier.h>
16 #include <asm/cmpxchg.h>
17
18 #define ATOMIC_INIT(i)  { (i) }
19
20 static inline int atomic_read(const atomic_t *v)
21 {
22         int c;
23
24         asm volatile(
25                 "       l       %0,%1\n"
26                 : "=d" (c) : "Q" (v->counter));
27         return c;
28 }
29
30 static inline void atomic_set(atomic_t *v, int i)
31 {
32         asm volatile(
33                 "       st      %1,%0\n"
34                 : "=Q" (v->counter) : "d" (i));
35 }
36
37 static inline int atomic_add_return(int i, atomic_t *v)
38 {
39         return __atomic_add_barrier(i, &v->counter) + i;
40 }
41
42 static inline int atomic_fetch_add(int i, atomic_t *v)
43 {
44         return __atomic_add_barrier(i, &v->counter);
45 }
46
47 static inline void atomic_add(int i, atomic_t *v)
48 {
49 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
50         if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
51                 __atomic_add_const(i, &v->counter);
52                 return;
53         }
54 #endif
55         __atomic_add(i, &v->counter);
56 }
57
58 #define atomic_add_negative(_i, _v)     (atomic_add_return(_i, _v) < 0)
59 #define atomic_inc(_v)                  atomic_add(1, _v)
60 #define atomic_inc_return(_v)           atomic_add_return(1, _v)
61 #define atomic_inc_and_test(_v)         (atomic_add_return(1, _v) == 0)
62 #define atomic_sub(_i, _v)              atomic_add(-(int)(_i), _v)
63 #define atomic_sub_return(_i, _v)       atomic_add_return(-(int)(_i), _v)
64 #define atomic_fetch_sub(_i, _v)        atomic_fetch_add(-(int)(_i), _v)
65 #define atomic_sub_and_test(_i, _v)     (atomic_sub_return(_i, _v) == 0)
66 #define atomic_dec(_v)                  atomic_sub(1, _v)
67 #define atomic_dec_return(_v)           atomic_sub_return(1, _v)
68 #define atomic_dec_and_test(_v)         (atomic_sub_return(1, _v) == 0)
69
70 #define ATOMIC_OPS(op)                                                  \
71 static inline void atomic_##op(int i, atomic_t *v)                      \
72 {                                                                       \
73         __atomic_##op(i, &v->counter);                                  \
74 }                                                                       \
75 static inline int atomic_fetch_##op(int i, atomic_t *v)                 \
76 {                                                                       \
77         return __atomic_##op##_barrier(i, &v->counter);                 \
78 }
79
80 ATOMIC_OPS(and)
81 ATOMIC_OPS(or)
82 ATOMIC_OPS(xor)
83
84 #undef ATOMIC_OPS
85
86 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
87
88 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
89 {
90         return __atomic_cmpxchg(&v->counter, old, new);
91 }
92
93 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
94 {
95         int c, old;
96         c = atomic_read(v);
97         for (;;) {
98                 if (unlikely(c == u))
99                         break;
100                 old = atomic_cmpxchg(v, c, c + a);
101                 if (likely(old == c))
102                         break;
103                 c = old;
104         }
105         return c;
106 }
107
108 #define ATOMIC64_INIT(i)  { (i) }
109
110 static inline long atomic64_read(const atomic64_t *v)
111 {
112         long c;
113
114         asm volatile(
115                 "       lg      %0,%1\n"
116                 : "=d" (c) : "Q" (v->counter));
117         return c;
118 }
119
120 static inline void atomic64_set(atomic64_t *v, long i)
121 {
122         asm volatile(
123                 "       stg     %1,%0\n"
124                 : "=Q" (v->counter) : "d" (i));
125 }
126
127 static inline long atomic64_add_return(long i, atomic64_t *v)
128 {
129         return __atomic64_add_barrier(i, &v->counter) + i;
130 }
131
132 static inline long atomic64_fetch_add(long i, atomic64_t *v)
133 {
134         return __atomic64_add_barrier(i, &v->counter);
135 }
136
137 static inline void atomic64_add(long i, atomic64_t *v)
138 {
139 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
140         if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
141                 __atomic64_add_const(i, &v->counter);
142                 return;
143         }
144 #endif
145         __atomic64_add(i, &v->counter);
146 }
147
148 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
149
150 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
151 {
152         return __atomic64_cmpxchg(&v->counter, old, new);
153 }
154
155 #define ATOMIC64_OPS(op)                                                \
156 static inline void atomic64_##op(long i, atomic64_t *v)                 \
157 {                                                                       \
158         __atomic64_##op(i, &v->counter);                                \
159 }                                                                       \
160 static inline long atomic64_fetch_##op(long i, atomic64_t *v)           \
161 {                                                                       \
162         return __atomic64_##op##_barrier(i, &v->counter);               \
163 }
164
165 ATOMIC64_OPS(and)
166 ATOMIC64_OPS(or)
167 ATOMIC64_OPS(xor)
168
169 #undef ATOMIC64_OPS
170
171 static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
172 {
173         long c, old;
174
175         c = atomic64_read(v);
176         for (;;) {
177                 if (unlikely(c == u))
178                         break;
179                 old = atomic64_cmpxchg(v, c, c + i);
180                 if (likely(old == c))
181                         break;
182                 c = old;
183         }
184         return c != u;
185 }
186
187 static inline long atomic64_dec_if_positive(atomic64_t *v)
188 {
189         long c, old, dec;
190
191         c = atomic64_read(v);
192         for (;;) {
193                 dec = c - 1;
194                 if (unlikely(dec < 0))
195                         break;
196                 old = atomic64_cmpxchg((v), c, dec);
197                 if (likely(old == c))
198                         break;
199                 c = old;
200         }
201         return dec;
202 }
203
204 #define atomic64_add_negative(_i, _v)   (atomic64_add_return(_i, _v) < 0)
205 #define atomic64_inc(_v)                atomic64_add(1, _v)
206 #define atomic64_inc_return(_v)         atomic64_add_return(1, _v)
207 #define atomic64_inc_and_test(_v)       (atomic64_add_return(1, _v) == 0)
208 #define atomic64_sub_return(_i, _v)     atomic64_add_return(-(long)(_i), _v)
209 #define atomic64_fetch_sub(_i, _v)      atomic64_fetch_add(-(long)(_i), _v)
210 #define atomic64_sub(_i, _v)            atomic64_add(-(long)(_i), _v)
211 #define atomic64_sub_and_test(_i, _v)   (atomic64_sub_return(_i, _v) == 0)
212 #define atomic64_dec(_v)                atomic64_sub(1, _v)
213 #define atomic64_dec_return(_v)         atomic64_sub_return(1, _v)
214 #define atomic64_dec_and_test(_v)       (atomic64_sub_return(1, _v) == 0)
215 #define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1, 0)
216
217 #endif /* __ARCH_S390_ATOMIC__  */