License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[sfrench/cifs-2.6.git] / arch / s390 / lib / spinlock.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Out of line spinlock code.
4  *
5  *    Copyright IBM Corp. 2004, 2006
6  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8
9 #include <linux/types.h>
10 #include <linux/export.h>
11 #include <linux/spinlock.h>
12 #include <linux/init.h>
13 #include <linux/smp.h>
14 #include <asm/io.h>
15
16 int spin_retry = -1;
17
18 static int __init spin_retry_init(void)
19 {
20         if (spin_retry < 0)
21                 spin_retry = 1000;
22         return 0;
23 }
24 early_initcall(spin_retry_init);
25
26 /**
27  * spin_retry= parameter
28  */
29 static int __init spin_retry_setup(char *str)
30 {
31         spin_retry = simple_strtoul(str, &str, 0);
32         return 1;
33 }
34 __setup("spin_retry=", spin_retry_setup);
35
36 static inline int arch_load_niai4(int *lock)
37 {
38         int owner;
39
40         asm volatile(
41 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
42                 "       .long   0xb2fa0040\n"   /* NIAI 4 */
43 #endif
44                 "       l       %0,%1\n"
45                 : "=d" (owner) : "Q" (*lock) : "memory");
46        return owner;
47 }
48
49 static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
50 {
51         int expected = old;
52
53         asm volatile(
54 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
55                 "       .long   0xb2fa0080\n"   /* NIAI 8 */
56 #endif
57                 "       cs      %0,%3,%1\n"
58                 : "=d" (old), "=Q" (*lock)
59                 : "0" (old), "d" (new), "Q" (*lock)
60                 : "cc", "memory");
61         return expected == old;
62 }
63
64 void arch_spin_lock_wait(arch_spinlock_t *lp)
65 {
66         int cpu = SPINLOCK_LOCKVAL;
67         int owner, count;
68
69         /* Pass the virtual CPU to the lock holder if it is not running */
70         owner = arch_load_niai4(&lp->lock);
71         if (owner && arch_vcpu_is_preempted(~owner))
72                 smp_yield_cpu(~owner);
73
74         count = spin_retry;
75         while (1) {
76                 owner = arch_load_niai4(&lp->lock);
77                 /* Try to get the lock if it is free. */
78                 if (!owner) {
79                         if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
80                                 return;
81                         continue;
82                 }
83                 if (count-- >= 0)
84                         continue;
85                 count = spin_retry;
86                 /*
87                  * For multiple layers of hypervisors, e.g. z/VM + LPAR
88                  * yield the CPU unconditionally. For LPAR rely on the
89                  * sense running status.
90                  */
91                 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
92                         smp_yield_cpu(~owner);
93         }
94 }
95 EXPORT_SYMBOL(arch_spin_lock_wait);
96
97 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
98 {
99         int cpu = SPINLOCK_LOCKVAL;
100         int owner, count;
101
102         local_irq_restore(flags);
103
104         /* Pass the virtual CPU to the lock holder if it is not running */
105         owner = arch_load_niai4(&lp->lock);
106         if (owner && arch_vcpu_is_preempted(~owner))
107                 smp_yield_cpu(~owner);
108
109         count = spin_retry;
110         while (1) {
111                 owner = arch_load_niai4(&lp->lock);
112                 /* Try to get the lock if it is free. */
113                 if (!owner) {
114                         local_irq_disable();
115                         if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
116                                 return;
117                         local_irq_restore(flags);
118                         continue;
119                 }
120                 if (count-- >= 0)
121                         continue;
122                 count = spin_retry;
123                 /*
124                  * For multiple layers of hypervisors, e.g. z/VM + LPAR
125                  * yield the CPU unconditionally. For LPAR rely on the
126                  * sense running status.
127                  */
128                 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
129                         smp_yield_cpu(~owner);
130         }
131 }
132 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
133
134 int arch_spin_trylock_retry(arch_spinlock_t *lp)
135 {
136         int cpu = SPINLOCK_LOCKVAL;
137         int owner, count;
138
139         for (count = spin_retry; count > 0; count--) {
140                 owner = READ_ONCE(lp->lock);
141                 /* Try to get the lock if it is free. */
142                 if (!owner) {
143                         if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
144                                 return 1;
145                 }
146         }
147         return 0;
148 }
149 EXPORT_SYMBOL(arch_spin_trylock_retry);
150
151 void _raw_read_lock_wait(arch_rwlock_t *rw)
152 {
153         int count = spin_retry;
154         int owner, old;
155
156 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
157         __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
158 #endif
159         owner = 0;
160         while (1) {
161                 if (count-- <= 0) {
162                         if (owner && arch_vcpu_is_preempted(~owner))
163                                 smp_yield_cpu(~owner);
164                         count = spin_retry;
165                 }
166                 old = ACCESS_ONCE(rw->lock);
167                 owner = ACCESS_ONCE(rw->owner);
168                 if (old < 0)
169                         continue;
170                 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
171                         return;
172         }
173 }
174 EXPORT_SYMBOL(_raw_read_lock_wait);
175
176 int _raw_read_trylock_retry(arch_rwlock_t *rw)
177 {
178         int count = spin_retry;
179         int old;
180
181         while (count-- > 0) {
182                 old = ACCESS_ONCE(rw->lock);
183                 if (old < 0)
184                         continue;
185                 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
186                         return 1;
187         }
188         return 0;
189 }
190 EXPORT_SYMBOL(_raw_read_trylock_retry);
191
192 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
193
194 void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
195 {
196         int count = spin_retry;
197         int owner, old;
198
199         owner = 0;
200         while (1) {
201                 if (count-- <= 0) {
202                         if (owner && arch_vcpu_is_preempted(~owner))
203                                 smp_yield_cpu(~owner);
204                         count = spin_retry;
205                 }
206                 old = ACCESS_ONCE(rw->lock);
207                 owner = ACCESS_ONCE(rw->owner);
208                 smp_mb();
209                 if (old >= 0) {
210                         prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
211                         old = prev;
212                 }
213                 if ((old & 0x7fffffff) == 0 && prev >= 0)
214                         break;
215         }
216 }
217 EXPORT_SYMBOL(_raw_write_lock_wait);
218
219 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
220
221 void _raw_write_lock_wait(arch_rwlock_t *rw)
222 {
223         int count = spin_retry;
224         int owner, old, prev;
225
226         prev = 0x80000000;
227         owner = 0;
228         while (1) {
229                 if (count-- <= 0) {
230                         if (owner && arch_vcpu_is_preempted(~owner))
231                                 smp_yield_cpu(~owner);
232                         count = spin_retry;
233                 }
234                 old = ACCESS_ONCE(rw->lock);
235                 owner = ACCESS_ONCE(rw->owner);
236                 if (old >= 0 &&
237                     __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
238                         prev = old;
239                 else
240                         smp_mb();
241                 if ((old & 0x7fffffff) == 0 && prev >= 0)
242                         break;
243         }
244 }
245 EXPORT_SYMBOL(_raw_write_lock_wait);
246
247 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
248
249 int _raw_write_trylock_retry(arch_rwlock_t *rw)
250 {
251         int count = spin_retry;
252         int old;
253
254         while (count-- > 0) {
255                 old = ACCESS_ONCE(rw->lock);
256                 if (old)
257                         continue;
258                 if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
259                         return 1;
260         }
261         return 0;
262 }
263 EXPORT_SYMBOL(_raw_write_trylock_retry);
264
265 void arch_lock_relax(int cpu)
266 {
267         if (!cpu)
268                 return;
269         if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
270                 return;
271         smp_yield_cpu(~cpu);
272 }
273 EXPORT_SYMBOL(arch_lock_relax);