Merge tag 'vfio-ccw-20170724' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms39...
[sfrench/cifs-2.6.git] / arch / s390 / include / asm / spinlock.h
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *
6  *  Derived from "include/asm-i386/spinlock.h"
7  */
8
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11
12 #include <linux/smp.h>
13 #include <asm/atomic_ops.h>
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
16
17 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
18
19 extern int spin_retry;
20
21 #ifndef CONFIG_SMP
22 static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
23 #else
24 bool arch_vcpu_is_preempted(int cpu);
25 #endif
26
27 #define vcpu_is_preempted arch_vcpu_is_preempted
28
29 /*
30  * Simple spin lock operations.  There are two variants, one clears IRQ's
31  * on the local processor, one does not.
32  *
33  * We make no fairness assumptions. They have a cost.
34  *
35  * (the type definitions are in asm/spinlock_types.h)
36  */
37
38 void arch_lock_relax(int cpu);
39
40 void arch_spin_lock_wait(arch_spinlock_t *);
41 int arch_spin_trylock_retry(arch_spinlock_t *);
42 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
43
44 static inline void arch_spin_relax(arch_spinlock_t *lock)
45 {
46         arch_lock_relax(lock->lock);
47 }
48
49 static inline u32 arch_spin_lockval(int cpu)
50 {
51         return ~cpu;
52 }
53
54 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
55 {
56         return lock.lock == 0;
57 }
58
59 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
60 {
61         return READ_ONCE(lp->lock) != 0;
62 }
63
64 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
65 {
66         barrier();
67         return likely(arch_spin_value_unlocked(*lp) &&
68                       __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
69 }
70
71 static inline void arch_spin_lock(arch_spinlock_t *lp)
72 {
73         if (!arch_spin_trylock_once(lp))
74                 arch_spin_lock_wait(lp);
75 }
76
77 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
78                                         unsigned long flags)
79 {
80         if (!arch_spin_trylock_once(lp))
81                 arch_spin_lock_wait_flags(lp, flags);
82 }
83
84 static inline int arch_spin_trylock(arch_spinlock_t *lp)
85 {
86         if (!arch_spin_trylock_once(lp))
87                 return arch_spin_trylock_retry(lp);
88         return 1;
89 }
90
91 static inline void arch_spin_unlock(arch_spinlock_t *lp)
92 {
93         typecheck(int, lp->lock);
94         asm volatile(
95 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
96                 "       .long   0xb2fa0070\n"   /* NIAI 7 */
97 #endif
98                 "       st      %1,%0\n"
99                 : "=Q" (lp->lock) : "d" (0) : "cc", "memory");
100 }
101
102 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
103 {
104         while (arch_spin_is_locked(lock))
105                 arch_spin_relax(lock);
106         smp_acquire__after_ctrl_dep();
107 }
108
109 /*
110  * Read-write spinlocks, allowing multiple readers
111  * but only one writer.
112  *
113  * NOTE! it is quite common to have readers in interrupts
114  * but no interrupt writers. For those circumstances we
115  * can "mix" irq-safe locks - any writer needs to get a
116  * irq-safe write-lock, but readers can get non-irqsafe
117  * read-locks.
118  */
119
120 /**
121  * read_can_lock - would read_trylock() succeed?
122  * @lock: the rwlock in question.
123  */
124 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
125
126 /**
127  * write_can_lock - would write_trylock() succeed?
128  * @lock: the rwlock in question.
129  */
130 #define arch_write_can_lock(x) ((x)->lock == 0)
131
132 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
133 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
134
135 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
136 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
137
138 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
139 {
140         int old = ACCESS_ONCE(rw->lock);
141         return likely(old >= 0 &&
142                       __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
143 }
144
145 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
146 {
147         int old = ACCESS_ONCE(rw->lock);
148         return likely(old == 0 &&
149                       __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
150 }
151
152 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
153
154 #define __RAW_OP_OR     "lao"
155 #define __RAW_OP_AND    "lan"
156 #define __RAW_OP_ADD    "laa"
157
158 #define __RAW_LOCK(ptr, op_val, op_string)              \
159 ({                                                      \
160         int old_val;                                    \
161                                                         \
162         typecheck(int *, ptr);                          \
163         asm volatile(                                   \
164                 op_string "     %0,%2,%1\n"             \
165                 "bcr    14,0\n"                         \
166                 : "=d" (old_val), "+Q" (*ptr)           \
167                 : "d" (op_val)                          \
168                 : "cc", "memory");                      \
169         old_val;                                        \
170 })
171
172 #define __RAW_UNLOCK(ptr, op_val, op_string)            \
173 ({                                                      \
174         int old_val;                                    \
175                                                         \
176         typecheck(int *, ptr);                          \
177         asm volatile(                                   \
178                 op_string "     %0,%2,%1\n"             \
179                 : "=d" (old_val), "+Q" (*ptr)           \
180                 : "d" (op_val)                          \
181                 : "cc", "memory");                      \
182         old_val;                                        \
183 })
184
185 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
186 extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
187
188 static inline void arch_read_lock(arch_rwlock_t *rw)
189 {
190         int old;
191
192         old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
193         if (old < 0)
194                 _raw_read_lock_wait(rw);
195 }
196
197 static inline void arch_read_unlock(arch_rwlock_t *rw)
198 {
199         __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
200 }
201
202 static inline void arch_write_lock(arch_rwlock_t *rw)
203 {
204         int old;
205
206         old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
207         if (old != 0)
208                 _raw_write_lock_wait(rw, old);
209         rw->owner = SPINLOCK_LOCKVAL;
210 }
211
212 static inline void arch_write_unlock(arch_rwlock_t *rw)
213 {
214         rw->owner = 0;
215         __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
216 }
217
218 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
219
220 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
221 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
222
223 static inline void arch_read_lock(arch_rwlock_t *rw)
224 {
225         if (!arch_read_trylock_once(rw))
226                 _raw_read_lock_wait(rw);
227 }
228
229 static inline void arch_read_unlock(arch_rwlock_t *rw)
230 {
231         int old;
232
233         do {
234                 old = ACCESS_ONCE(rw->lock);
235         } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
236 }
237
238 static inline void arch_write_lock(arch_rwlock_t *rw)
239 {
240         if (!arch_write_trylock_once(rw))
241                 _raw_write_lock_wait(rw);
242         rw->owner = SPINLOCK_LOCKVAL;
243 }
244
245 static inline void arch_write_unlock(arch_rwlock_t *rw)
246 {
247         typecheck(int, rw->lock);
248
249         rw->owner = 0;
250         asm volatile(
251                 "st     %1,%0\n"
252                 : "+Q" (rw->lock)
253                 : "d" (0)
254                 : "cc", "memory");
255 }
256
257 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
258
259 static inline int arch_read_trylock(arch_rwlock_t *rw)
260 {
261         if (!arch_read_trylock_once(rw))
262                 return _raw_read_trylock_retry(rw);
263         return 1;
264 }
265
266 static inline int arch_write_trylock(arch_rwlock_t *rw)
267 {
268         if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
269                 return 0;
270         rw->owner = SPINLOCK_LOCKVAL;
271         return 1;
272 }
273
274 static inline void arch_read_relax(arch_rwlock_t *rw)
275 {
276         arch_lock_relax(rw->owner);
277 }
278
279 static inline void arch_write_relax(arch_rwlock_t *rw)
280 {
281         arch_lock_relax(rw->owner);
282 }
283
284 #endif /* __ASM_SPINLOCK_H */