Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / s390 / include / asm / spinlock.h
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *
6  *  Derived from "include/asm-i386/spinlock.h"
7  */
8
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11
12 #include <linux/smp.h>
13 #include <asm/atomic_ops.h>
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
16
17 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
18
19 extern int spin_retry;
20
21 #ifndef CONFIG_SMP
22 static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
23 #else
24 bool arch_vcpu_is_preempted(int cpu);
25 #endif
26
27 #define vcpu_is_preempted arch_vcpu_is_preempted
28
29 /*
30  * Simple spin lock operations.  There are two variants, one clears IRQ's
31  * on the local processor, one does not.
32  *
33  * We make no fairness assumptions. They have a cost.
34  *
35  * (the type definitions are in asm/spinlock_types.h)
36  */
37
38 void arch_lock_relax(int cpu);
39
40 void arch_spin_lock_wait(arch_spinlock_t *);
41 int arch_spin_trylock_retry(arch_spinlock_t *);
42 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
43
44 static inline void arch_spin_relax(arch_spinlock_t *lock)
45 {
46         arch_lock_relax(lock->lock);
47 }
48
49 static inline u32 arch_spin_lockval(int cpu)
50 {
51         return ~cpu;
52 }
53
54 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
55 {
56         return lock.lock == 0;
57 }
58
59 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
60 {
61         return READ_ONCE(lp->lock) != 0;
62 }
63
64 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
65 {
66         barrier();
67         return likely(arch_spin_value_unlocked(*lp) &&
68                       __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
69 }
70
71 static inline void arch_spin_lock(arch_spinlock_t *lp)
72 {
73         if (!arch_spin_trylock_once(lp))
74                 arch_spin_lock_wait(lp);
75 }
76
77 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
78                                         unsigned long flags)
79 {
80         if (!arch_spin_trylock_once(lp))
81                 arch_spin_lock_wait_flags(lp, flags);
82 }
83
84 static inline int arch_spin_trylock(arch_spinlock_t *lp)
85 {
86         if (!arch_spin_trylock_once(lp))
87                 return arch_spin_trylock_retry(lp);
88         return 1;
89 }
90
91 static inline void arch_spin_unlock(arch_spinlock_t *lp)
92 {
93         typecheck(int, lp->lock);
94         asm volatile(
95                 "st     %1,%0\n"
96                 : "+Q" (lp->lock)
97                 : "d" (0)
98                 : "cc", "memory");
99 }
100
101 /*
102  * Read-write spinlocks, allowing multiple readers
103  * but only one writer.
104  *
105  * NOTE! it is quite common to have readers in interrupts
106  * but no interrupt writers. For those circumstances we
107  * can "mix" irq-safe locks - any writer needs to get a
108  * irq-safe write-lock, but readers can get non-irqsafe
109  * read-locks.
110  */
111
112 /**
113  * read_can_lock - would read_trylock() succeed?
114  * @lock: the rwlock in question.
115  */
116 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
117
118 /**
119  * write_can_lock - would write_trylock() succeed?
120  * @lock: the rwlock in question.
121  */
122 #define arch_write_can_lock(x) ((x)->lock == 0)
123
124 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
125 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
126
127 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
128 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
129
130 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
131 {
132         int old = ACCESS_ONCE(rw->lock);
133         return likely(old >= 0 &&
134                       __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
135 }
136
137 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
138 {
139         int old = ACCESS_ONCE(rw->lock);
140         return likely(old == 0 &&
141                       __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
142 }
143
144 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
145
146 #define __RAW_OP_OR     "lao"
147 #define __RAW_OP_AND    "lan"
148 #define __RAW_OP_ADD    "laa"
149
150 #define __RAW_LOCK(ptr, op_val, op_string)              \
151 ({                                                      \
152         int old_val;                                    \
153                                                         \
154         typecheck(int *, ptr);                          \
155         asm volatile(                                   \
156                 op_string "     %0,%2,%1\n"             \
157                 "bcr    14,0\n"                         \
158                 : "=d" (old_val), "+Q" (*ptr)           \
159                 : "d" (op_val)                          \
160                 : "cc", "memory");                      \
161         old_val;                                        \
162 })
163
164 #define __RAW_UNLOCK(ptr, op_val, op_string)            \
165 ({                                                      \
166         int old_val;                                    \
167                                                         \
168         typecheck(int *, ptr);                          \
169         asm volatile(                                   \
170                 op_string "     %0,%2,%1\n"             \
171                 : "=d" (old_val), "+Q" (*ptr)           \
172                 : "d" (op_val)                          \
173                 : "cc", "memory");                      \
174         old_val;                                        \
175 })
176
177 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
178 extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
179
180 static inline void arch_read_lock(arch_rwlock_t *rw)
181 {
182         int old;
183
184         old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
185         if (old < 0)
186                 _raw_read_lock_wait(rw);
187 }
188
189 static inline void arch_read_unlock(arch_rwlock_t *rw)
190 {
191         __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
192 }
193
194 static inline void arch_write_lock(arch_rwlock_t *rw)
195 {
196         int old;
197
198         old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
199         if (old != 0)
200                 _raw_write_lock_wait(rw, old);
201         rw->owner = SPINLOCK_LOCKVAL;
202 }
203
204 static inline void arch_write_unlock(arch_rwlock_t *rw)
205 {
206         rw->owner = 0;
207         __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
208 }
209
210 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
211
212 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
213 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
214
215 static inline void arch_read_lock(arch_rwlock_t *rw)
216 {
217         if (!arch_read_trylock_once(rw))
218                 _raw_read_lock_wait(rw);
219 }
220
221 static inline void arch_read_unlock(arch_rwlock_t *rw)
222 {
223         int old;
224
225         do {
226                 old = ACCESS_ONCE(rw->lock);
227         } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
228 }
229
230 static inline void arch_write_lock(arch_rwlock_t *rw)
231 {
232         if (!arch_write_trylock_once(rw))
233                 _raw_write_lock_wait(rw);
234         rw->owner = SPINLOCK_LOCKVAL;
235 }
236
237 static inline void arch_write_unlock(arch_rwlock_t *rw)
238 {
239         typecheck(int, rw->lock);
240
241         rw->owner = 0;
242         asm volatile(
243                 "st     %1,%0\n"
244                 : "+Q" (rw->lock)
245                 : "d" (0)
246                 : "cc", "memory");
247 }
248
249 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
250
251 static inline int arch_read_trylock(arch_rwlock_t *rw)
252 {
253         if (!arch_read_trylock_once(rw))
254                 return _raw_read_trylock_retry(rw);
255         return 1;
256 }
257
258 static inline int arch_write_trylock(arch_rwlock_t *rw)
259 {
260         if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
261                 return 0;
262         rw->owner = SPINLOCK_LOCKVAL;
263         return 1;
264 }
265
266 static inline void arch_read_relax(arch_rwlock_t *rw)
267 {
268         arch_lock_relax(rw->owner);
269 }
270
271 static inline void arch_write_relax(arch_rwlock_t *rw)
272 {
273         arch_lock_relax(rw->owner);
274 }
275
276 #endif /* __ASM_SPINLOCK_H */