Merge tag 'modules-for-v4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu...
[sfrench/cifs-2.6.git] / arch / alpha / include / asm / spinlock.h
1 #ifndef _ALPHA_SPINLOCK_H
2 #define _ALPHA_SPINLOCK_H
3
4 #include <linux/kernel.h>
5 #include <asm/current.h>
6 #include <asm/barrier.h>
7 #include <asm/processor.h>
8
9 /*
10  * Simple spin lock operations.  There are two variants, one clears IRQ's
11  * on the local processor, one does not.
12  *
13  * We make no fairness assumptions. They have a cost.
14  */
15
16 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
17 #define arch_spin_is_locked(x)  ((x)->lock != 0)
18
19 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
20 {
21         return lock.lock == 0;
22 }
23
24 static inline void arch_spin_unlock(arch_spinlock_t * lock)
25 {
26         mb();
27         lock->lock = 0;
28 }
29
30 static inline void arch_spin_lock(arch_spinlock_t * lock)
31 {
32         long tmp;
33
34         __asm__ __volatile__(
35         "1:     ldl_l   %0,%1\n"
36         "       bne     %0,2f\n"
37         "       lda     %0,1\n"
38         "       stl_c   %0,%1\n"
39         "       beq     %0,2f\n"
40         "       mb\n"
41         ".subsection 2\n"
42         "2:     ldl     %0,%1\n"
43         "       bne     %0,2b\n"
44         "       br      1b\n"
45         ".previous"
46         : "=&r" (tmp), "=m" (lock->lock)
47         : "m"(lock->lock) : "memory");
48 }
49
50 static inline int arch_spin_trylock(arch_spinlock_t *lock)
51 {
52         return !test_and_set_bit(0, &lock->lock);
53 }
54
55 /***********************************************************/
56
57 static inline int arch_read_can_lock(arch_rwlock_t *lock)
58 {
59         return (lock->lock & 1) == 0;
60 }
61
62 static inline int arch_write_can_lock(arch_rwlock_t *lock)
63 {
64         return lock->lock == 0;
65 }
66
67 static inline void arch_read_lock(arch_rwlock_t *lock)
68 {
69         long regx;
70
71         __asm__ __volatile__(
72         "1:     ldl_l   %1,%0\n"
73         "       blbs    %1,6f\n"
74         "       subl    %1,2,%1\n"
75         "       stl_c   %1,%0\n"
76         "       beq     %1,6f\n"
77         "       mb\n"
78         ".subsection 2\n"
79         "6:     ldl     %1,%0\n"
80         "       blbs    %1,6b\n"
81         "       br      1b\n"
82         ".previous"
83         : "=m" (*lock), "=&r" (regx)
84         : "m" (*lock) : "memory");
85 }
86
87 static inline void arch_write_lock(arch_rwlock_t *lock)
88 {
89         long regx;
90
91         __asm__ __volatile__(
92         "1:     ldl_l   %1,%0\n"
93         "       bne     %1,6f\n"
94         "       lda     %1,1\n"
95         "       stl_c   %1,%0\n"
96         "       beq     %1,6f\n"
97         "       mb\n"
98         ".subsection 2\n"
99         "6:     ldl     %1,%0\n"
100         "       bne     %1,6b\n"
101         "       br      1b\n"
102         ".previous"
103         : "=m" (*lock), "=&r" (regx)
104         : "m" (*lock) : "memory");
105 }
106
107 static inline int arch_read_trylock(arch_rwlock_t * lock)
108 {
109         long regx;
110         int success;
111
112         __asm__ __volatile__(
113         "1:     ldl_l   %1,%0\n"
114         "       lda     %2,0\n"
115         "       blbs    %1,2f\n"
116         "       subl    %1,2,%2\n"
117         "       stl_c   %2,%0\n"
118         "       beq     %2,6f\n"
119         "2:     mb\n"
120         ".subsection 2\n"
121         "6:     br      1b\n"
122         ".previous"
123         : "=m" (*lock), "=&r" (regx), "=&r" (success)
124         : "m" (*lock) : "memory");
125
126         return success;
127 }
128
129 static inline int arch_write_trylock(arch_rwlock_t * lock)
130 {
131         long regx;
132         int success;
133
134         __asm__ __volatile__(
135         "1:     ldl_l   %1,%0\n"
136         "       lda     %2,0\n"
137         "       bne     %1,2f\n"
138         "       lda     %2,1\n"
139         "       stl_c   %2,%0\n"
140         "       beq     %2,6f\n"
141         "2:     mb\n"
142         ".subsection 2\n"
143         "6:     br      1b\n"
144         ".previous"
145         : "=m" (*lock), "=&r" (regx), "=&r" (success)
146         : "m" (*lock) : "memory");
147
148         return success;
149 }
150
151 static inline void arch_read_unlock(arch_rwlock_t * lock)
152 {
153         long regx;
154         __asm__ __volatile__(
155         "       mb\n"
156         "1:     ldl_l   %1,%0\n"
157         "       addl    %1,2,%1\n"
158         "       stl_c   %1,%0\n"
159         "       beq     %1,6f\n"
160         ".subsection 2\n"
161         "6:     br      1b\n"
162         ".previous"
163         : "=m" (*lock), "=&r" (regx)
164         : "m" (*lock) : "memory");
165 }
166
167 static inline void arch_write_unlock(arch_rwlock_t * lock)
168 {
169         mb();
170         lock->lock = 0;
171 }
172
173 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
174 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
175
176 #endif /* _ALPHA_SPINLOCK_H */