3 * Copyright IBM Corp. 1999
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 * Derived from "include/asm-i386/spinlock.h"
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
12 #include <linux/smp.h>
13 #include <asm/atomic_ops.h>
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
17 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
19 extern int spin_retry;
22 static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
24 bool arch_vcpu_is_preempted(int cpu);
27 #define vcpu_is_preempted arch_vcpu_is_preempted
30 * Simple spin lock operations. There are two variants, one clears IRQ's
31 * on the local processor, one does not.
33 * We make no fairness assumptions. They have a cost.
35 * (the type definitions are in asm/spinlock_types.h)
38 void arch_lock_relax(int cpu);
40 void arch_spin_lock_wait(arch_spinlock_t *);
41 int arch_spin_trylock_retry(arch_spinlock_t *);
42 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44 static inline void arch_spin_relax(arch_spinlock_t *lock)
46 arch_lock_relax(lock->lock);
49 static inline u32 arch_spin_lockval(int cpu)
54 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
56 return lock.lock == 0;
59 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
61 return READ_ONCE(lp->lock) != 0;
64 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
67 return likely(arch_spin_value_unlocked(*lp) &&
68 __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
71 static inline void arch_spin_lock(arch_spinlock_t *lp)
73 if (!arch_spin_trylock_once(lp))
74 arch_spin_lock_wait(lp);
77 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
80 if (!arch_spin_trylock_once(lp))
81 arch_spin_lock_wait_flags(lp, flags);
84 static inline int arch_spin_trylock(arch_spinlock_t *lp)
86 if (!arch_spin_trylock_once(lp))
87 return arch_spin_trylock_retry(lp);
91 static inline void arch_spin_unlock(arch_spinlock_t *lp)
93 typecheck(int, lp->lock);
101 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
103 while (arch_spin_is_locked(lock))
104 arch_spin_relax(lock);
105 smp_acquire__after_ctrl_dep();
109 * Read-write spinlocks, allowing multiple readers
110 * but only one writer.
112 * NOTE! it is quite common to have readers in interrupts
113 * but no interrupt writers. For those circumstances we
114 * can "mix" irq-safe locks - any writer needs to get a
115 * irq-safe write-lock, but readers can get non-irqsafe
120 * read_can_lock - would read_trylock() succeed?
121 * @lock: the rwlock in question.
123 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
126 * write_can_lock - would write_trylock() succeed?
127 * @lock: the rwlock in question.
129 #define arch_write_can_lock(x) ((x)->lock == 0)
131 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
132 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
134 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
135 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
137 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
139 int old = ACCESS_ONCE(rw->lock);
140 return likely(old >= 0 &&
141 __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
144 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
146 int old = ACCESS_ONCE(rw->lock);
147 return likely(old == 0 &&
148 __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
151 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
153 #define __RAW_OP_OR "lao"
154 #define __RAW_OP_AND "lan"
155 #define __RAW_OP_ADD "laa"
157 #define __RAW_LOCK(ptr, op_val, op_string) \
161 typecheck(int *, ptr); \
163 op_string " %0,%2,%1\n" \
165 : "=d" (old_val), "+Q" (*ptr) \
171 #define __RAW_UNLOCK(ptr, op_val, op_string) \
175 typecheck(int *, ptr); \
177 op_string " %0,%2,%1\n" \
178 : "=d" (old_val), "+Q" (*ptr) \
184 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
185 extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
187 static inline void arch_read_lock(arch_rwlock_t *rw)
191 old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
193 _raw_read_lock_wait(rw);
196 static inline void arch_read_unlock(arch_rwlock_t *rw)
198 __RAW_UNLOCK(&rw->lock, -1, __RAW_OP_ADD);
201 static inline void arch_write_lock(arch_rwlock_t *rw)
205 old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
207 _raw_write_lock_wait(rw, old);
208 rw->owner = SPINLOCK_LOCKVAL;
211 static inline void arch_write_unlock(arch_rwlock_t *rw)
214 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
217 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
219 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
220 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
222 static inline void arch_read_lock(arch_rwlock_t *rw)
224 if (!arch_read_trylock_once(rw))
225 _raw_read_lock_wait(rw);
228 static inline void arch_read_unlock(arch_rwlock_t *rw)
233 old = ACCESS_ONCE(rw->lock);
234 } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
237 static inline void arch_write_lock(arch_rwlock_t *rw)
239 if (!arch_write_trylock_once(rw))
240 _raw_write_lock_wait(rw);
241 rw->owner = SPINLOCK_LOCKVAL;
244 static inline void arch_write_unlock(arch_rwlock_t *rw)
246 typecheck(int, rw->lock);
256 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
258 static inline int arch_read_trylock(arch_rwlock_t *rw)
260 if (!arch_read_trylock_once(rw))
261 return _raw_read_trylock_retry(rw);
265 static inline int arch_write_trylock(arch_rwlock_t *rw)
267 if (!arch_write_trylock_once(rw) && !_raw_write_trylock_retry(rw))
269 rw->owner = SPINLOCK_LOCKVAL;
273 static inline void arch_read_relax(arch_rwlock_t *rw)
275 arch_lock_relax(rw->owner);
278 static inline void arch_write_relax(arch_rwlock_t *rw)
280 arch_lock_relax(rw->owner);
283 #endif /* __ASM_SPINLOCK_H */