Merge branch 'for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
[sfrench/cifs-2.6.git] / arch / ia64 / include / asm / spinlock.h
1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
3
4 /*
5  * Copyright (C) 1998-2003 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8  *
9  * This file is used for SMP configurations only.
10  */
11
12 #include <linux/compiler.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15
16 #include <linux/atomic.h>
17 #include <asm/intrinsics.h>
18 #include <asm/barrier.h>
19 #include <asm/processor.h>
20
21 #define arch_spin_lock_init(x)                  ((x)->lock = 0)
22
23 /*
24  * Ticket locks are conceptually two parts, one indicating the current head of
25  * the queue, and the other indicating the current tail. The lock is acquired
26  * by atomically noting the tail and incrementing it by one (thus adding
27  * ourself to the queue and noting our position), then waiting until the head
28  * becomes equal to the the initial value of the tail.
29  * The pad bits in the middle are used to prevent the next_ticket number
30  * overflowing into the now_serving number.
31  *
32  *   31             17  16    15  14                    0
33  *  +----------------------------------------------------+
34  *  |  now_serving     | padding |   next_ticket         |
35  *  +----------------------------------------------------+
36  */
37
38 #define TICKET_SHIFT    17
39 #define TICKET_BITS     15
40 #define TICKET_MASK     ((1 << TICKET_BITS) - 1)
41
42 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
43 {
44         int     *p = (int *)&lock->lock, ticket, serve;
45
46         ticket = ia64_fetchadd(1, p, acq);
47
48         if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
49                 return;
50
51         ia64_invala();
52
53         for (;;) {
54                 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
55
56                 if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
57                         return;
58                 cpu_relax();
59         }
60 }
61
62 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
63 {
64         int tmp = ACCESS_ONCE(lock->lock);
65
66         if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
67                 return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
68         return 0;
69 }
70
71 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
72 {
73         unsigned short  *p = (unsigned short *)&lock->lock + 1, tmp;
74
75         asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
76         ACCESS_ONCE(*p) = (tmp + 2) & ~1;
77 }
78
79 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
80 {
81         long tmp = ACCESS_ONCE(lock->lock);
82
83         return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
84 }
85
86 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
87 {
88         long tmp = ACCESS_ONCE(lock->lock);
89
90         return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
91 }
92
93 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
94 {
95         return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
96 }
97
98 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
99 {
100         return __ticket_spin_is_locked(lock);
101 }
102
103 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
104 {
105         return __ticket_spin_is_contended(lock);
106 }
107 #define arch_spin_is_contended  arch_spin_is_contended
108
109 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
110 {
111         __ticket_spin_lock(lock);
112 }
113
114 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
115 {
116         return __ticket_spin_trylock(lock);
117 }
118
119 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
120 {
121         __ticket_spin_unlock(lock);
122 }
123
124 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
125                                                   unsigned long flags)
126 {
127         arch_spin_lock(lock);
128 }
129
130 #define arch_read_can_lock(rw)          (*(volatile int *)(rw) >= 0)
131 #define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
132
133 #ifdef ASM_SUPPORTED
134
135 static __always_inline void
136 arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
137 {
138         __asm__ __volatile__ (
139                 "tbit.nz p6, p0 = %1,%2\n"
140                 "br.few 3f\n"
141                 "1:\n"
142                 "fetchadd4.rel r2 = [%0], -1;;\n"
143                 "(p6) ssm psr.i\n"
144                 "2:\n"
145                 "hint @pause\n"
146                 "ld4 r2 = [%0];;\n"
147                 "cmp4.lt p7,p0 = r2, r0\n"
148                 "(p7) br.cond.spnt.few 2b\n"
149                 "(p6) rsm psr.i\n"
150                 ";;\n"
151                 "3:\n"
152                 "fetchadd4.acq r2 = [%0], 1;;\n"
153                 "cmp4.lt p7,p0 = r2, r0\n"
154                 "(p7) br.cond.spnt.few 1b\n"
155                 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
156                 : "p6", "p7", "r2", "memory");
157 }
158
159 #define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
160
161 #else /* !ASM_SUPPORTED */
162
163 #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
164
165 #define arch_read_lock(rw)                                                              \
166 do {                                                                                    \
167         arch_rwlock_t *__read_lock_ptr = (rw);                                          \
168                                                                                         \
169         while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) {          \
170                 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);                        \
171                 while (*(volatile int *)__read_lock_ptr < 0)                            \
172                         cpu_relax();                                                    \
173         }                                                                               \
174 } while (0)
175
176 #endif /* !ASM_SUPPORTED */
177
178 #define arch_read_unlock(rw)                                    \
179 do {                                                            \
180         arch_rwlock_t *__read_lock_ptr = (rw);                  \
181         ia64_fetchadd(-1, (int *) __read_lock_ptr, rel);        \
182 } while (0)
183
184 #ifdef ASM_SUPPORTED
185
186 static __always_inline void
187 arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
188 {
189         __asm__ __volatile__ (
190                 "tbit.nz p6, p0 = %1, %2\n"
191                 "mov ar.ccv = r0\n"
192                 "dep r29 = -1, r0, 31, 1\n"
193                 "br.few 3f;;\n"
194                 "1:\n"
195                 "(p6) ssm psr.i\n"
196                 "2:\n"
197                 "hint @pause\n"
198                 "ld4 r2 = [%0];;\n"
199                 "cmp4.eq p0,p7 = r0, r2\n"
200                 "(p7) br.cond.spnt.few 2b\n"
201                 "(p6) rsm psr.i\n"
202                 ";;\n"
203                 "3:\n"
204                 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
205                 "cmp4.eq p0,p7 = r0, r2\n"
206                 "(p7) br.cond.spnt.few 1b;;\n"
207                 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
208                 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
209 }
210
211 #define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
212
213 #define arch_write_trylock(rw)                                                  \
214 ({                                                                              \
215         register long result;                                                   \
216                                                                                 \
217         __asm__ __volatile__ (                                                  \
218                 "mov ar.ccv = r0\n"                                             \
219                 "dep r29 = -1, r0, 31, 1;;\n"                                   \
220                 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n"                         \
221                 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory");          \
222         (result == 0);                                                          \
223 })
224
225 static inline void arch_write_unlock(arch_rwlock_t *x)
226 {
227         u8 *y = (u8 *)x;
228         barrier();
229         asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
230 }
231
232 #else /* !ASM_SUPPORTED */
233
234 #define arch_write_lock_flags(l, flags) arch_write_lock(l)
235
236 #define arch_write_lock(l)                                                              \
237 ({                                                                                      \
238         __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);                       \
239         __u32 *ia64_write_lock_ptr = (__u32 *) (l);                                     \
240         do {                                                                            \
241                 while (*ia64_write_lock_ptr)                                            \
242                         ia64_barrier();                                                 \
243                 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0);     \
244         } while (ia64_val);                                                             \
245 })
246
247 #define arch_write_trylock(rw)                                          \
248 ({                                                                      \
249         __u64 ia64_val;                                                 \
250         __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1);                  \
251         ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0);   \
252         (ia64_val == 0);                                                \
253 })
254
255 static inline void arch_write_unlock(arch_rwlock_t *x)
256 {
257         barrier();
258         x->write_lock = 0;
259 }
260
261 #endif /* !ASM_SUPPORTED */
262
263 static inline int arch_read_trylock(arch_rwlock_t *x)
264 {
265         union {
266                 arch_rwlock_t lock;
267                 __u32 word;
268         } old, new;
269         old.lock = new.lock = *x;
270         old.lock.write_lock = new.lock.write_lock = 0;
271         ++new.lock.read_counter;
272         return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
273 }
274
275 #define arch_spin_relax(lock)   cpu_relax()
276 #define arch_read_relax(lock)   cpu_relax()
277 #define arch_write_relax(lock)  cpu_relax()
278
279 #endif /*  _ASM_IA64_SPINLOCK_H */