3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 * Derived from "include/asm-i386/spinlock.h"
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <linux/smp.h>
13 #include <asm/atomic_ops.h>
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
17 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
19 extern int spin_retry;
22 static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
24 bool arch_vcpu_is_preempted(int cpu);
27 #define vcpu_is_preempted arch_vcpu_is_preempted
30 * Simple spin lock operations. There are two variants, one clears IRQ's
31 * on the local processor, one does not.
33 * We make no fairness assumptions. They have a cost.
35 * (the type definitions are in asm/spinlock_types.h)
38 void arch_lock_relax(int cpu);
40 void arch_spin_lock_wait(arch_spinlock_t *);
41 int arch_spin_trylock_retry(arch_spinlock_t *);
42 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44 static inline void arch_spin_relax(arch_spinlock_t *lock)
46 arch_lock_relax(lock->lock);
49 static inline u32 arch_spin_lockval(int cpu)
54 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
56 return lock.lock == 0;
59 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
61 return READ_ONCE(lp->lock) != 0;
64 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
67 return likely(arch_spin_value_unlocked(*lp) &&
68 __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
71 static inline void arch_spin_lock(arch_spinlock_t *lp)
73 if (!arch_spin_trylock_once(lp))
74 arch_spin_lock_wait(lp);
77 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
80 if (!arch_spin_trylock_once(lp))
81 arch_spin_lock_wait_flags(lp, flags);
84 static inline int arch_spin_trylock(arch_spinlock_t *lp)
86 if (!arch_spin_trylock_once(lp))
87 return arch_spin_trylock_retry(lp);
91 static inline void arch_spin_unlock(arch_spinlock_t *lp)
93 typecheck(int, lp->lock);
95 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
96 " .long 0xb2fa0070\n" /* NIAI 7 */
99 : "=Q" (lp->lock) : "d" (0) : "cc", "memory");
103 * Read-write spinlocks, allowing multiple readers
104 * but only one writer.
106 * NOTE! it is quite common to have readers in interrupts
107 * but no interrupt writers. For those circumstances we
108 * can "mix" irq-safe locks - any writer needs to get a
109 * irq-safe write-lock, but readers can get non-irqsafe
114 * read_can_lock - would read_trylock() succeed?
115 * @lock: the rwlock in question.
117 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
120 * write_can_lock - would write_trylock() succeed?
121 * @lock: the rwlock in question.
123 #define arch_write_can_lock(x) ((x)->lock == 0)
125 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
126 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
128 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
129 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
131 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
133 int old = ACCESS_ONCE(rw->lock);
134 return likely(old >= 0 &&
135 __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
138 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
140 int old = ACCESS_ONCE(rw->lock);
141 return likely(old == 0 &&
142 __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
145 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
147 #define __RAW_OP_OR "lao"
148 #define __RAW_OP_AND "lan"
149 #define __RAW_OP_ADD "laa"
151 #define __RAW_LOCK(ptr, op_val, op_string) \
155 typecheck(int *, ptr); \
157 op_string " %0,%2,%1\n" \
159 : "=d" (old_val), "+Q" (*ptr) \
165 #define __RAW_UNLOCK(ptr, op_val, op_string) \
169 typecheck(int *, ptr); \
171 op_string " %0,%2,%1\n" \
172 : "=d" (old_val), "+Q" (*ptr) \
178 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
179 extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
181 static inline void arch_read_lock(arch_rwlock_t *rw)
185 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
187 _raw_read_lock_wait(rw);
190 static inline void arch_read_unlock(arch_rwlock_t *rw)
192 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
195 static inline void arch_write_lock(arch_rwlock_t *rw)
199 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
201 _raw_write_lock_wait(rw, old);
202 rw->owner = SPINLOCK_LOCKVAL;
205 static inline void arch_write_unlock(arch_rwlock_t *rw)
208 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
211 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
213 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
214 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
216 static inline void arch_read_lock(arch_rwlock_t *rw)
218 if (!arch_read_trylock_once(rw))
219 _raw_read_lock_wait(rw);
222 static inline void arch_read_unlock(arch_rwlock_t *rw)
227 old = ACCESS_ONCE(rw->lock);
228 } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
231 static inline void arch_write_lock(arch_rwlock_t *rw)
233 if (!arch_write_trylock_once(rw))
234 _raw_write_lock_wait(rw);
235 rw->owner = SPINLOCK_LOCKVAL;
238 static inline void arch_write_unlock(arch_rwlock_t *rw)
240 typecheck(int, rw->lock);
250 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
252 static inline int arch_read_trylock(arch_rwlock_t *rw)
254 if (!arch_read_trylock_once(rw))
255 return _raw_read_trylock_retry(rw);
259 static inline int arch_write_trylock(arch_rwlock_t *rw)
261 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
263 rw->owner = SPINLOCK_LOCKVAL;
267 static inline void arch_read_relax(arch_rwlock_t *rw)
269 arch_lock_relax(rw->owner);
272 static inline void arch_write_relax(arch_rwlock_t *rw)
274 arch_lock_relax(rw->owner);
277 #endif /* __ASM_SPINLOCK_H */