Linux 6.10-rc2
[sfrench/cifs-2.6.git] / arch / parisc / include / asm / spinlock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_SPINLOCK_H
3 #define __ASM_SPINLOCK_H
4
5 #include <asm/barrier.h>
6 #include <asm/ldcw.h>
7 #include <asm/processor.h>
8 #include <asm/spinlock_types.h>
9
10 static inline void arch_spin_val_check(int lock_val)
11 {
12         if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
13                 asm volatile(   "andcm,= %0,%1,%%r0\n"
14                                 ".word %2\n"
15                 : : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
16                         "i" (SPINLOCK_BREAK_INSN));
17 }
18
19 static inline int arch_spin_is_locked(arch_spinlock_t *x)
20 {
21         volatile unsigned int *a;
22         int lock_val;
23
24         a = __ldcw_align(x);
25         lock_val = READ_ONCE(*a);
26         arch_spin_val_check(lock_val);
27         return (lock_val == 0);
28 }
29
30 static inline void arch_spin_lock(arch_spinlock_t *x)
31 {
32         volatile unsigned int *a;
33
34         a = __ldcw_align(x);
35         do {
36                 int lock_val_old;
37
38                 lock_val_old = __ldcw(a);
39                 arch_spin_val_check(lock_val_old);
40                 if (lock_val_old)
41                         return; /* got lock */
42
43                 /* wait until we should try to get lock again */
44                 while (*a == 0)
45                         continue;
46         } while (1);
47 }
48
49 static inline void arch_spin_unlock(arch_spinlock_t *x)
50 {
51         volatile unsigned int *a;
52
53         a = __ldcw_align(x);
54         /* Release with ordered store. */
55         __asm__ __volatile__("stw,ma %0,0(%1)"
56                 : : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
57 }
58
59 static inline int arch_spin_trylock(arch_spinlock_t *x)
60 {
61         volatile unsigned int *a;
62         int lock_val;
63
64         a = __ldcw_align(x);
65         lock_val = __ldcw(a);
66         arch_spin_val_check(lock_val);
67         return lock_val != 0;
68 }
69
70 /*
71  * Read-write spinlocks, allowing multiple readers but only one writer.
72  * Unfair locking as Writers could be starved indefinitely by Reader(s)
73  *
74  * The spinlock itself is contained in @counter and access to it is
75  * serialized with @lock_mutex.
76  */
77
78 /* 1 - lock taken successfully */
79 static inline int arch_read_trylock(arch_rwlock_t *rw)
80 {
81         int ret = 0;
82         unsigned long flags;
83
84         local_irq_save(flags);
85         arch_spin_lock(&(rw->lock_mutex));
86
87         /*
88          * zero means writer holds the lock exclusively, deny Reader.
89          * Otherwise grant lock to first/subseq reader
90          */
91         if (rw->counter > 0) {
92                 rw->counter--;
93                 ret = 1;
94         }
95
96         arch_spin_unlock(&(rw->lock_mutex));
97         local_irq_restore(flags);
98
99         return ret;
100 }
101
102 /* 1 - lock taken successfully */
103 static inline int arch_write_trylock(arch_rwlock_t *rw)
104 {
105         int ret = 0;
106         unsigned long flags;
107
108         local_irq_save(flags);
109         arch_spin_lock(&(rw->lock_mutex));
110
111         /*
112          * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
113          * deny writer. Otherwise if unlocked grant to writer
114          * Hence the claim that Linux rwlocks are unfair to writers.
115          * (can be starved for an indefinite time by readers).
116          */
117         if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
118                 rw->counter = 0;
119                 ret = 1;
120         }
121         arch_spin_unlock(&(rw->lock_mutex));
122         local_irq_restore(flags);
123
124         return ret;
125 }
126
127 static inline void arch_read_lock(arch_rwlock_t *rw)
128 {
129         while (!arch_read_trylock(rw))
130                 cpu_relax();
131 }
132
133 static inline void arch_write_lock(arch_rwlock_t *rw)
134 {
135         while (!arch_write_trylock(rw))
136                 cpu_relax();
137 }
138
139 static inline void arch_read_unlock(arch_rwlock_t *rw)
140 {
141         unsigned long flags;
142
143         local_irq_save(flags);
144         arch_spin_lock(&(rw->lock_mutex));
145         rw->counter++;
146         arch_spin_unlock(&(rw->lock_mutex));
147         local_irq_restore(flags);
148 }
149
150 static inline void arch_write_unlock(arch_rwlock_t *rw)
151 {
152         unsigned long flags;
153
154         local_irq_save(flags);
155         arch_spin_lock(&(rw->lock_mutex));
156         rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
157         arch_spin_unlock(&(rw->lock_mutex));
158         local_irq_restore(flags);
159 }
160
161 #endif /* __ASM_SPINLOCK_H */