1 /* spinlock.h: 32-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
12 #include <asm/barrier.h>
13 #include <asm/processor.h> /* for cpu_relax */
15 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
17 static inline void arch_spin_lock(arch_spinlock_t *lock)
21 "ldstub [%0], %%g2\n\t"
22 "orcc %%g2, 0x0, %%g0\n\t"
24 " ldub [%0], %%g2\n\t"
27 "orcc %%g2, 0x0, %%g0\n\t"
29 " ldub [%0], %%g2\n\t"
34 : "g2", "memory", "cc");
37 static inline int arch_spin_trylock(arch_spinlock_t *lock)
40 __asm__ __volatile__("ldstub [%1], %0"
47 static inline void arch_spin_unlock(arch_spinlock_t *lock)
49 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
52 /* Read-write spinlocks, allowing multiple readers
53 * but only one writer.
55 * NOTE! it is quite common to have readers in interrupts
56 * but no interrupt writers. For those circumstances we
57 * can "mix" irq-safe locks - any writer needs to get a
58 * irq-safe write-lock, but readers can get non-irqsafe
61 * XXX This might create some problems with my dual spinlock
62 * XXX scheme, deadlocks etc. -DaveM
64 * Sort of like atomic_t's on Sparc, but even more clever.
66 * ------------------------------------
67 * | 24-bit counter | wlock | arch_rwlock_t
68 * ------------------------------------
71 * wlock signifies the one writer is in or somebody is updating
72 * counter. For a writer, if he successfully acquires the wlock,
73 * but counter is non-zero, he has to release the lock and wait,
74 * till both counter and wlock are zero.
76 * Unfortunately this scheme limits us to ~16,000,000 cpus.
78 static inline void __arch_read_lock(arch_rwlock_t *rw)
80 register arch_rwlock_t *lp asm("g1");
84 "call ___rw_read_enter\n\t"
85 " ldstub [%%g1 + 3], %%g2\n"
88 : "g2", "g4", "memory", "cc");
91 #define arch_read_lock(lock) \
92 do { unsigned long flags; \
93 local_irq_save(flags); \
94 __arch_read_lock(lock); \
95 local_irq_restore(flags); \
98 static inline void __arch_read_unlock(arch_rwlock_t *rw)
100 register arch_rwlock_t *lp asm("g1");
102 __asm__ __volatile__(
104 "call ___rw_read_exit\n\t"
105 " ldstub [%%g1 + 3], %%g2\n"
108 : "g2", "g4", "memory", "cc");
111 #define arch_read_unlock(lock) \
112 do { unsigned long flags; \
113 local_irq_save(flags); \
114 __arch_read_unlock(lock); \
115 local_irq_restore(flags); \
118 static inline void arch_write_lock(arch_rwlock_t *rw)
120 register arch_rwlock_t *lp asm("g1");
122 __asm__ __volatile__(
124 "call ___rw_write_enter\n\t"
125 " ldstub [%%g1 + 3], %%g2\n"
128 : "g2", "g4", "memory", "cc");
129 *(volatile __u32 *)&lp->lock = ~0U;
132 static inline void arch_write_unlock(arch_rwlock_t *lock)
134 __asm__ __volatile__(
141 static inline int arch_write_trylock(arch_rwlock_t *rw)
145 __asm__ __volatile__("ldstub [%1 + 3], %0"
151 val = rw->lock & ~0xff;
153 ((volatile u8*)&rw->lock)[3] = 0;
155 *(volatile u32*)&rw->lock = ~0U;
161 static inline int __arch_read_trylock(arch_rwlock_t *rw)
163 register arch_rwlock_t *lp asm("g1");
164 register int res asm("o0");
166 __asm__ __volatile__(
168 "call ___rw_read_try\n\t"
169 " ldstub [%%g1 + 3], %%g2\n"
172 : "g2", "g4", "memory", "cc");
176 #define arch_read_trylock(lock) \
177 ({ unsigned long flags; \
179 local_irq_save(flags); \
180 res = __arch_read_trylock(lock); \
181 local_irq_restore(flags); \
185 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
186 #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
187 #define arch_write_lock_flags(rw, flags) arch_write_lock(rw)
189 #define arch_spin_relax(lock) cpu_relax()
190 #define arch_read_relax(lock) cpu_relax()
191 #define arch_write_relax(lock) cpu_relax()
193 #define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
194 #define arch_write_can_lock(rw) (!(rw)->lock)
196 #endif /* !(__ASSEMBLY__) */
198 #endif /* __SPARC_SPINLOCK_H */