1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
5 #include <asm/barrier.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
10 static inline void arch_spin_val_check(int lock_val)
12 if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
13 asm volatile( "andcm,= %0,%1,%%r0\n"
15 : : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
16 "i" (SPINLOCK_BREAK_INSN));
19 static inline int arch_spin_is_locked(arch_spinlock_t *x)
21 volatile unsigned int *a;
25 lock_val = READ_ONCE(*a);
26 arch_spin_val_check(lock_val);
27 return (lock_val == 0);
30 static inline void arch_spin_lock(arch_spinlock_t *x)
32 volatile unsigned int *a;
38 lock_val_old = __ldcw(a);
39 arch_spin_val_check(lock_val_old);
41 return; /* got lock */
43 /* wait until we should try to get lock again */
49 static inline void arch_spin_unlock(arch_spinlock_t *x)
51 volatile unsigned int *a;
54 /* Release with ordered store. */
55 __asm__ __volatile__("stw,ma %0,0(%1)"
56 : : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
59 static inline int arch_spin_trylock(arch_spinlock_t *x)
61 volatile unsigned int *a;
66 arch_spin_val_check(lock_val);
71 * Read-write spinlocks, allowing multiple readers but only one writer.
72 * Unfair locking as Writers could be starved indefinitely by Reader(s)
74 * The spinlock itself is contained in @counter and access to it is
75 * serialized with @lock_mutex.
78 /* 1 - lock taken successfully */
79 static inline int arch_read_trylock(arch_rwlock_t *rw)
84 local_irq_save(flags);
85 arch_spin_lock(&(rw->lock_mutex));
88 * zero means writer holds the lock exclusively, deny Reader.
89 * Otherwise grant lock to first/subseq reader
91 if (rw->counter > 0) {
96 arch_spin_unlock(&(rw->lock_mutex));
97 local_irq_restore(flags);
102 /* 1 - lock taken successfully */
103 static inline int arch_write_trylock(arch_rwlock_t *rw)
108 local_irq_save(flags);
109 arch_spin_lock(&(rw->lock_mutex));
112 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
113 * deny writer. Otherwise if unlocked grant to writer
114 * Hence the claim that Linux rwlocks are unfair to writers.
115 * (can be starved for an indefinite time by readers).
117 if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
121 arch_spin_unlock(&(rw->lock_mutex));
122 local_irq_restore(flags);
127 static inline void arch_read_lock(arch_rwlock_t *rw)
129 while (!arch_read_trylock(rw))
133 static inline void arch_write_lock(arch_rwlock_t *rw)
135 while (!arch_write_trylock(rw))
139 static inline void arch_read_unlock(arch_rwlock_t *rw)
143 local_irq_save(flags);
144 arch_spin_lock(&(rw->lock_mutex));
146 arch_spin_unlock(&(rw->lock_mutex));
147 local_irq_restore(flags);
150 static inline void arch_write_unlock(arch_rwlock_t *rw)
154 local_irq_save(flags);
155 arch_spin_lock(&(rw->lock_mutex));
156 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
157 arch_spin_unlock(&(rw->lock_mutex));
158 local_irq_restore(flags);
161 #endif /* __ASM_SPINLOCK_H */