3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 * Derived from "include/asm-i386/spinlock.h"
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <linux/smp.h>
13 #include <asm/atomic_ops.h>
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
17 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
19 extern int spin_retry;
22 static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
24 bool arch_vcpu_is_preempted(int cpu);
27 #define vcpu_is_preempted arch_vcpu_is_preempted
30 * Simple spin lock operations. There are two variants, one clears IRQ's
31 * on the local processor, one does not.
33 * We make no fairness assumptions. They have a cost.
35 * (the type definitions are in asm/spinlock_types.h)
38 void arch_lock_relax(int cpu);
40 void arch_spin_lock_wait(arch_spinlock_t *);
41 int arch_spin_trylock_retry(arch_spinlock_t *);
42 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44 static inline void arch_spin_relax(arch_spinlock_t *lock)
46 arch_lock_relax(lock->lock);
49 static inline u32 arch_spin_lockval(int cpu)
54 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
56 return lock.lock == 0;
59 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
61 return READ_ONCE(lp->lock) != 0;
64 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
67 return likely(arch_spin_value_unlocked(*lp) &&
68 __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
71 static inline void arch_spin_lock(arch_spinlock_t *lp)
73 if (!arch_spin_trylock_once(lp))
74 arch_spin_lock_wait(lp);
77 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
80 if (!arch_spin_trylock_once(lp))
81 arch_spin_lock_wait_flags(lp, flags);
84 static inline int arch_spin_trylock(arch_spinlock_t *lp)
86 if (!arch_spin_trylock_once(lp))
87 return arch_spin_trylock_retry(lp);
91 static inline void arch_spin_unlock(arch_spinlock_t *lp)
93 typecheck(int, lp->lock);
95 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
96 " .long 0xb2fa0070\n" /* NIAI 7 */
99 : "=Q" (lp->lock) : "d" (0) : "cc", "memory");
102 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
104 while (arch_spin_is_locked(lock))
105 arch_spin_relax(lock);
106 smp_acquire__after_ctrl_dep();
110 * Read-write spinlocks, allowing multiple readers
111 * but only one writer.
113 * NOTE! it is quite common to have readers in interrupts
114 * but no interrupt writers. For those circumstances we
115 * can "mix" irq-safe locks - any writer needs to get a
116 * irq-safe write-lock, but readers can get non-irqsafe
121 * read_can_lock - would read_trylock() succeed?
122 * @lock: the rwlock in question.
124 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
127 * write_can_lock - would write_trylock() succeed?
128 * @lock: the rwlock in question.
130 #define arch_write_can_lock(x) ((x)->lock == 0)
132 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
133 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
135 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
136 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
138 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
140 int old = ACCESS_ONCE(rw->lock);
141 return likely(old >= 0 &&
142 __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
145 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
147 int old = ACCESS_ONCE(rw->lock);
148 return likely(old == 0 &&
149 __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
152 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154 #define __RAW_OP_OR "lao"
155 #define __RAW_OP_AND "lan"
156 #define __RAW_OP_ADD "laa"
158 #define __RAW_LOCK(ptr, op_val, op_string) \
162 typecheck(int *, ptr); \
164 op_string " %0,%2,%1\n" \
166 : "=d" (old_val), "+Q" (*ptr) \
172 #define __RAW_UNLOCK(ptr, op_val, op_string) \
176 typecheck(int *, ptr); \
178 op_string " %0,%2,%1\n" \
179 : "=d" (old_val), "+Q" (*ptr) \
185 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
186 extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
188 static inline void arch_read_lock(arch_rwlock_t *rw)
192 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
194 _raw_read_lock_wait(rw);
197 static inline void arch_read_unlock(arch_rwlock_t *rw)
199 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
202 static inline void arch_write_lock(arch_rwlock_t *rw)
206 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
208 _raw_write_lock_wait(rw, old);
209 rw->owner = SPINLOCK_LOCKVAL;
212 static inline void arch_write_unlock(arch_rwlock_t *rw)
215 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
218 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
220 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
221 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
223 static inline void arch_read_lock(arch_rwlock_t *rw)
225 if (!arch_read_trylock_once(rw))
226 _raw_read_lock_wait(rw);
229 static inline void arch_read_unlock(arch_rwlock_t *rw)
234 old = ACCESS_ONCE(rw->lock);
235 } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
238 static inline void arch_write_lock(arch_rwlock_t *rw)
240 if (!arch_write_trylock_once(rw))
241 _raw_write_lock_wait(rw);
242 rw->owner = SPINLOCK_LOCKVAL;
245 static inline void arch_write_unlock(arch_rwlock_t *rw)
247 typecheck(int, rw->lock);
257 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
259 static inline int arch_read_trylock(arch_rwlock_t *rw)
261 if (!arch_read_trylock_once(rw))
262 return _raw_read_trylock_retry(rw);
266 static inline int arch_write_trylock(arch_rwlock_t *rw)
268 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
270 rw->owner = SPINLOCK_LOCKVAL;
274 static inline void arch_read_relax(arch_rwlock_t *rw)
276 arch_lock_relax(rw->owner);
279 static inline void arch_write_relax(arch_rwlock_t *rw)
281 arch_lock_relax(rw->owner);
284 #endif /* __ASM_SPINLOCK_H */