2 * include/asm-sh/spinlock-llsc.h
4 * Copyright (C) 2002, 2003 Paul Mundt
5 * Copyright (C) 2006, 2007 Akio Idehara
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
11 #ifndef __ASM_SH_SPINLOCK_LLSC_H
12 #define __ASM_SH_SPINLOCK_LLSC_H
14 #include <asm/barrier.h>
15 #include <asm/processor.h>
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
21 #define arch_spin_is_locked(x) ((x)->lock <= 0)
22 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
25 * Simple spin lock operations. There are two variants, one clears IRQ's
26 * on the local processor, one does not.
28 * We make no fairness assumptions. They have a cost.
30 static inline void arch_spin_lock(arch_spinlock_t *lock)
35 __asm__ __volatile__ (
37 "movli.l @%2, %0 ! arch_spin_lock \n\t"
40 "movco.l %0, @%2 \n\t"
44 : "=&z" (tmp), "=&r" (oldval)
50 static inline void arch_spin_unlock(arch_spinlock_t *lock)
54 __asm__ __volatile__ (
55 "mov #1, %0 ! arch_spin_unlock \n\t"
63 static inline int arch_spin_trylock(arch_spinlock_t *lock)
65 unsigned long tmp, oldval;
67 __asm__ __volatile__ (
69 "movli.l @%2, %0 ! arch_spin_trylock \n\t"
72 "movco.l %0, @%2 \n\t"
75 : "=&z" (tmp), "=&r" (oldval)
84 * Read-write spinlocks, allowing multiple readers but only one writer.
86 * NOTE! it is quite common to have readers in interrupts but no interrupt
87 * writers. For those circumstances we can "mix" irq-safe locks - any writer
88 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
93 * read_can_lock - would read_trylock() succeed?
94 * @lock: the rwlock in question.
96 #define arch_read_can_lock(x) ((x)->lock > 0)
99 * write_can_lock - would write_trylock() succeed?
100 * @lock: the rwlock in question.
102 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
104 static inline void arch_read_lock(arch_rwlock_t *rw)
108 __asm__ __volatile__ (
110 "movli.l @%1, %0 ! arch_read_lock \n\t"
114 "movco.l %0, @%1 \n\t"
122 static inline void arch_read_unlock(arch_rwlock_t *rw)
126 __asm__ __volatile__ (
128 "movli.l @%1, %0 ! arch_read_unlock \n\t"
130 "movco.l %0, @%1 \n\t"
138 static inline void arch_write_lock(arch_rwlock_t *rw)
142 __asm__ __volatile__ (
144 "movli.l @%1, %0 ! arch_write_lock \n\t"
148 "movco.l %0, @%1 \n\t"
151 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
156 static inline void arch_write_unlock(arch_rwlock_t *rw)
158 __asm__ __volatile__ (
159 "mov.l %1, @%0 ! arch_write_unlock \n\t"
161 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
166 static inline int arch_read_trylock(arch_rwlock_t *rw)
168 unsigned long tmp, oldval;
170 __asm__ __volatile__ (
172 "movli.l @%2, %0 ! arch_read_trylock \n\t"
177 "movco.l %0, @%2 \n\t"
181 : "=&z" (tmp), "=&r" (oldval)
189 static inline int arch_write_trylock(arch_rwlock_t *rw)
191 unsigned long tmp, oldval;
193 __asm__ __volatile__ (
195 "movli.l @%2, %0 ! arch_write_trylock \n\t"
201 "movco.l %0, @%2 \n\t"
204 : "=&z" (tmp), "=&r" (oldval)
205 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
209 return (oldval > (RW_LOCK_BIAS - 1));
212 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
213 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
215 #define arch_spin_relax(lock) cpu_relax()
216 #define arch_read_relax(lock) cpu_relax()
217 #define arch_write_relax(lock) cpu_relax()
219 #endif /* __ASM_SH_SPINLOCK_LLSC_H */