Merge branch 'for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[sfrench/cifs-2.6.git] / arch / mn10300 / include / asm / spinlock.h
1 /* MN10300 spinlock support
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 #ifndef _ASM_SPINLOCK_H
12 #define _ASM_SPINLOCK_H
13
14 #include <linux/atomic.h>
15 #include <asm/barrier.h>
16 #include <asm/processor.h>
17 #include <asm/rwlock.h>
18 #include <asm/page.h>
19
20 /*
21  * Simple spin lock operations.  There are two variants, one clears IRQ's
22  * on the local processor, one does not.
23  *
24  * We make no fairness assumptions. They have a cost.
25  */
26
27 #define arch_spin_is_locked(x)  (*(volatile signed char *)(&(x)->slock) != 0)
28
29 static inline void arch_spin_unlock(arch_spinlock_t *lock)
30 {
31         asm volatile(
32                 "       bclr    1,(0,%0)        \n"
33                 :
34                 : "a"(&lock->slock)
35                 : "memory", "cc");
36 }
37
38 static inline int arch_spin_trylock(arch_spinlock_t *lock)
39 {
40         int ret;
41
42         asm volatile(
43                 "       mov     1,%0            \n"
44                 "       bset    %0,(%1)         \n"
45                 "       bne     1f              \n"
46                 "       clr     %0              \n"
47                 "1:     xor     1,%0            \n"
48                 : "=d"(ret)
49                 : "a"(&lock->slock)
50                 : "memory", "cc");
51
52         return ret;
53 }
54
55 static inline void arch_spin_lock(arch_spinlock_t *lock)
56 {
57         asm volatile(
58                 "1:     bset    1,(0,%0)        \n"
59                 "       bne     1b              \n"
60                 :
61                 : "a"(&lock->slock)
62                 : "memory", "cc");
63 }
64
65 static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
66                                          unsigned long flags)
67 {
68         int temp;
69
70         asm volatile(
71                 "1:     bset    1,(0,%2)        \n"
72                 "       beq     3f              \n"
73                 "       mov     %1,epsw         \n"
74                 "2:     mov     (0,%2),%0       \n"
75                 "       or      %0,%0           \n"
76                 "       bne     2b              \n"
77                 "       mov     %3,%0           \n"
78                 "       mov     %0,epsw         \n"
79                 "       nop                     \n"
80                 "       nop                     \n"
81                 "       bra     1b\n"
82                 "3:                             \n"
83                 : "=&d" (temp)
84                 : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
85                 : "memory", "cc");
86 }
87
88 #ifdef __KERNEL__
89
90 /*
91  * Read-write spinlocks, allowing multiple readers
92  * but only one writer.
93  *
94  * NOTE! it is quite common to have readers in interrupts
95  * but no interrupt writers. For those circumstances we
96  * can "mix" irq-safe locks - any writer needs to get a
97  * irq-safe write-lock, but readers can get non-irqsafe
98  * read-locks.
99  */
100
101 /**
102  * read_can_lock - would read_trylock() succeed?
103  * @lock: the rwlock in question.
104  */
105 #define arch_read_can_lock(x) ((int)(x)->lock > 0)
106
107 /**
108  * write_can_lock - would write_trylock() succeed?
109  * @lock: the rwlock in question.
110  */
111 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
112
113 /*
114  * On mn10300, we implement read-write locks as a 32-bit counter
115  * with the high bit (sign) being the "contended" bit.
116  */
117 static inline void arch_read_lock(arch_rwlock_t *rw)
118 {
119 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
120         __build_read_lock(rw, "__read_lock_failed");
121 #else
122         {
123                 atomic_t *count = (atomic_t *)rw;
124                 while (atomic_dec_return(count) < 0)
125                         atomic_inc(count);
126         }
127 #endif
128 }
129
130 static inline void arch_write_lock(arch_rwlock_t *rw)
131 {
132 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
133         __build_write_lock(rw, "__write_lock_failed");
134 #else
135         {
136                 atomic_t *count = (atomic_t *)rw;
137                 while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
138                         atomic_add(RW_LOCK_BIAS, count);
139         }
140 #endif
141 }
142
143 static inline void arch_read_unlock(arch_rwlock_t *rw)
144 {
145 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
146         __build_read_unlock(rw);
147 #else
148         {
149                 atomic_t *count = (atomic_t *)rw;
150                 atomic_inc(count);
151         }
152 #endif
153 }
154
155 static inline void arch_write_unlock(arch_rwlock_t *rw)
156 {
157 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
158         __build_write_unlock(rw);
159 #else
160         {
161                 atomic_t *count = (atomic_t *)rw;
162                 atomic_add(RW_LOCK_BIAS, count);
163         }
164 #endif
165 }
166
167 static inline int arch_read_trylock(arch_rwlock_t *lock)
168 {
169         atomic_t *count = (atomic_t *)lock;
170         atomic_dec(count);
171         if (atomic_read(count) >= 0)
172                 return 1;
173         atomic_inc(count);
174         return 0;
175 }
176
177 static inline int arch_write_trylock(arch_rwlock_t *lock)
178 {
179         atomic_t *count = (atomic_t *)lock;
180         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
181                 return 1;
182         atomic_add(RW_LOCK_BIAS, count);
183         return 0;
184 }
185
186 #define arch_read_lock_flags(lock, flags)  arch_read_lock(lock)
187 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
188
189 #define _raw_spin_relax(lock)   cpu_relax()
190 #define _raw_read_relax(lock)   cpu_relax()
191 #define _raw_write_relax(lock)  cpu_relax()
192
193 #endif /* __KERNEL__ */
194 #endif /* _ASM_SPINLOCK_H */