Merge branches 'work.misc' and 'work.dcache' of git://git.kernel.org/pub/scm/linux...
[sfrench/cifs-2.6.git] / arch / s390 / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright IBM Corp. 1999, 2016
4  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5  *            Denis Joseph Barrow,
6  *            Arnd Bergmann,
7  */
8
9 #ifndef __ARCH_S390_ATOMIC__
10 #define __ARCH_S390_ATOMIC__
11
12 #include <linux/compiler.h>
13 #include <linux/types.h>
14 #include <asm/atomic_ops.h>
15 #include <asm/barrier.h>
16 #include <asm/cmpxchg.h>
17
18 #define ATOMIC_INIT(i)  { (i) }
19
20 static inline int atomic_read(const atomic_t *v)
21 {
22         int c;
23
24         asm volatile(
25                 "       l       %0,%1\n"
26                 : "=d" (c) : "Q" (v->counter));
27         return c;
28 }
29
30 static inline void atomic_set(atomic_t *v, int i)
31 {
32         asm volatile(
33                 "       st      %1,%0\n"
34                 : "=Q" (v->counter) : "d" (i));
35 }
36
37 static inline int atomic_add_return(int i, atomic_t *v)
38 {
39         return __atomic_add_barrier(i, &v->counter) + i;
40 }
41
42 static inline int atomic_fetch_add(int i, atomic_t *v)
43 {
44         return __atomic_add_barrier(i, &v->counter);
45 }
46
47 static inline void atomic_add(int i, atomic_t *v)
48 {
49 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
50         if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
51                 __atomic_add_const(i, &v->counter);
52                 return;
53         }
54 #endif
55         __atomic_add(i, &v->counter);
56 }
57
58 #define atomic_sub(_i, _v)              atomic_add(-(int)(_i), _v)
59 #define atomic_sub_return(_i, _v)       atomic_add_return(-(int)(_i), _v)
60 #define atomic_fetch_sub(_i, _v)        atomic_fetch_add(-(int)(_i), _v)
61
62 #define ATOMIC_OPS(op)                                                  \
63 static inline void atomic_##op(int i, atomic_t *v)                      \
64 {                                                                       \
65         __atomic_##op(i, &v->counter);                                  \
66 }                                                                       \
67 static inline int atomic_fetch_##op(int i, atomic_t *v)                 \
68 {                                                                       \
69         return __atomic_##op##_barrier(i, &v->counter);                 \
70 }
71
72 ATOMIC_OPS(and)
73 ATOMIC_OPS(or)
74 ATOMIC_OPS(xor)
75
76 #undef ATOMIC_OPS
77
78 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
79
80 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
81 {
82         return __atomic_cmpxchg(&v->counter, old, new);
83 }
84
85 #define ATOMIC64_INIT(i)  { (i) }
86
87 static inline long atomic64_read(const atomic64_t *v)
88 {
89         long c;
90
91         asm volatile(
92                 "       lg      %0,%1\n"
93                 : "=d" (c) : "Q" (v->counter));
94         return c;
95 }
96
97 static inline void atomic64_set(atomic64_t *v, long i)
98 {
99         asm volatile(
100                 "       stg     %1,%0\n"
101                 : "=Q" (v->counter) : "d" (i));
102 }
103
104 static inline long atomic64_add_return(long i, atomic64_t *v)
105 {
106         return __atomic64_add_barrier(i, &v->counter) + i;
107 }
108
109 static inline long atomic64_fetch_add(long i, atomic64_t *v)
110 {
111         return __atomic64_add_barrier(i, &v->counter);
112 }
113
114 static inline void atomic64_add(long i, atomic64_t *v)
115 {
116 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
117         if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
118                 __atomic64_add_const(i, &v->counter);
119                 return;
120         }
121 #endif
122         __atomic64_add(i, &v->counter);
123 }
124
125 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
126
127 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
128 {
129         return __atomic64_cmpxchg(&v->counter, old, new);
130 }
131
132 #define ATOMIC64_OPS(op)                                                \
133 static inline void atomic64_##op(long i, atomic64_t *v)                 \
134 {                                                                       \
135         __atomic64_##op(i, &v->counter);                                \
136 }                                                                       \
137 static inline long atomic64_fetch_##op(long i, atomic64_t *v)           \
138 {                                                                       \
139         return __atomic64_##op##_barrier(i, &v->counter);               \
140 }
141
142 ATOMIC64_OPS(and)
143 ATOMIC64_OPS(or)
144 ATOMIC64_OPS(xor)
145
146 #undef ATOMIC64_OPS
147
148 #define atomic64_sub_return(_i, _v)     atomic64_add_return(-(long)(_i), _v)
149 #define atomic64_fetch_sub(_i, _v)      atomic64_fetch_add(-(long)(_i), _v)
150 #define atomic64_sub(_i, _v)            atomic64_add(-(long)(_i), _v)
151
152 #endif /* __ARCH_S390_ATOMIC__  */