1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
8 #include <linux/prefetch.h>
9 #include <asm/barrier.h>
10 #include <asm/processor.h>
13 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
14 * extensions, so when running on UP, we have to patch these instructions away.
16 #ifdef CONFIG_THUMB2_KERNEL
18 * For Thumb-2, special care is needed to ensure that the conditional WFE
19 * instruction really does assemble to exactly 4 bytes (as required by
20 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
21 * assembler to insert a extra (16-bit) IT instruction, depending on the
22 * presence or absence of neighbouring conditional instructions.
24 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
25 * the assembler won't change IT instructions which are explicitly present
28 #define WFE(cond) __ALT_SMP_ASM( \
35 #define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
38 #define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
40 static inline void dsb_sev(void)
48 * ARMv6 ticket-based spin-locking.
50 * A memory barrier is required after we get a lock, and before we
51 * release it, because V6 CPUs are assumed to have weakly ordered
55 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
57 static inline void arch_spin_lock(arch_spinlock_t *lock)
61 arch_spinlock_t lockval;
63 prefetchw(&lock->slock);
67 " strex %2, %1, [%3]\n"
70 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
71 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
74 while (lockval.tickets.next != lockval.tickets.owner) {
76 lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
82 static inline int arch_spin_trylock(arch_spinlock_t *lock)
84 unsigned long contended, res;
87 prefetchw(&lock->slock);
92 " subs %1, %0, %0, ror #16\n"
94 " strexeq %2, %0, [%3]"
95 : "=&r" (slock), "=&r" (contended), "=&r" (res)
96 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
108 static inline void arch_spin_unlock(arch_spinlock_t *lock)
111 lock->tickets.owner++;
115 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
117 return lock.tickets.owner == lock.tickets.next;
120 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
122 return !arch_spin_value_unlocked(READ_ONCE(*lock));
125 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
127 struct __raw_tickets tickets = READ_ONCE(lock->tickets);
128 return (tickets.next - tickets.owner) > 1;
130 #define arch_spin_is_contended arch_spin_is_contended
136 * Write locks are easy - we just set bit 31. When unlocking, we can
137 * just write zero since the lock is exclusively held.
140 static inline void arch_write_lock(arch_rwlock_t *rw)
144 prefetchw(&rw->lock);
145 __asm__ __volatile__(
146 "1: ldrex %0, [%1]\n"
149 " strexeq %0, %2, [%1]\n"
153 : "r" (&rw->lock), "r" (0x80000000)
159 static inline int arch_write_trylock(arch_rwlock_t *rw)
161 unsigned long contended, res;
163 prefetchw(&rw->lock);
165 __asm__ __volatile__(
169 " strexeq %1, %3, [%2]"
170 : "=&r" (contended), "=&r" (res)
171 : "r" (&rw->lock), "r" (0x80000000)
183 static inline void arch_write_unlock(arch_rwlock_t *rw)
187 __asm__ __volatile__(
190 : "r" (&rw->lock), "r" (0)
196 /* write_can_lock - would write_trylock() succeed? */
197 #define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
200 * Read locks are a bit more hairy:
201 * - Exclusively load the lock value.
203 * - Store new lock value if positive, and we still own this location.
204 * If the value is negative, we've already failed.
205 * - If we failed to store the value, we want a negative result.
206 * - If we failed, try again.
207 * Unlocking is similarly hairy. We may have multiple read locks
208 * currently active. However, we know we won't have any write
211 static inline void arch_read_lock(arch_rwlock_t *rw)
213 unsigned long tmp, tmp2;
215 prefetchw(&rw->lock);
216 __asm__ __volatile__(
217 "1: ldrex %0, [%2]\n"
219 " strexpl %1, %0, [%2]\n"
221 " rsbpls %0, %1, #0\n"
223 : "=&r" (tmp), "=&r" (tmp2)
230 static inline void arch_read_unlock(arch_rwlock_t *rw)
232 unsigned long tmp, tmp2;
236 prefetchw(&rw->lock);
237 __asm__ __volatile__(
238 "1: ldrex %0, [%2]\n"
240 " strex %1, %0, [%2]\n"
243 : "=&r" (tmp), "=&r" (tmp2)
251 static inline int arch_read_trylock(arch_rwlock_t *rw)
253 unsigned long contended, res;
255 prefetchw(&rw->lock);
257 __asm__ __volatile__(
261 " strexpl %1, %0, [%2]"
262 : "=&r" (contended), "=&r" (res)
267 /* If the lock is negative, then it is already held for write. */
268 if (contended < 0x80000000) {
276 /* read_can_lock - would read_trylock() succeed? */
277 #define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
279 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
280 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
282 #define arch_spin_relax(lock) cpu_relax()
283 #define arch_read_relax(lock) cpu_relax()
284 #define arch_write_relax(lock) cpu_relax()
286 #endif /* __ASM_SPINLOCK_H */