Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[sfrench/cifs-2.6.git] / arch / s390 / include / asm / spinlock.h
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999
4  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5  *
6  *  Derived from "include/asm-i386/spinlock.h"
7  */
8
9 #ifndef __ASM_SPINLOCK_H
10 #define __ASM_SPINLOCK_H
11
12 #include <linux/smp.h>
13
14 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
15
16 extern int spin_retry;
17
18 static inline int
19 _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20 {
21         unsigned int old_expected = old;
22
23         asm volatile(
24                 "       cs      %0,%3,%1"
25                 : "=d" (old), "=Q" (*lock)
26                 : "0" (old), "d" (new), "Q" (*lock)
27                 : "cc", "memory" );
28         return old == old_expected;
29 }
30
31 /*
32  * Simple spin lock operations.  There are two variants, one clears IRQ's
33  * on the local processor, one does not.
34  *
35  * We make no fairness assumptions. They have a cost.
36  *
37  * (the type definitions are in asm/spinlock_types.h)
38  */
39
40 void arch_spin_lock_wait(arch_spinlock_t *);
41 int arch_spin_trylock_retry(arch_spinlock_t *);
42 void arch_spin_relax(arch_spinlock_t *);
43 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
44
45 static inline u32 arch_spin_lockval(int cpu)
46 {
47         return ~cpu;
48 }
49
50 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
51 {
52         return lock.lock == 0;
53 }
54
55 static inline int arch_spin_is_locked(arch_spinlock_t *lp)
56 {
57         return ACCESS_ONCE(lp->lock) != 0;
58 }
59
60 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
61 {
62         barrier();
63         return likely(arch_spin_value_unlocked(*lp) &&
64                       _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
65 }
66
67 static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
68 {
69         return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
70 }
71
72 static inline void arch_spin_lock(arch_spinlock_t *lp)
73 {
74         if (!arch_spin_trylock_once(lp))
75                 arch_spin_lock_wait(lp);
76 }
77
78 static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
79                                         unsigned long flags)
80 {
81         if (!arch_spin_trylock_once(lp))
82                 arch_spin_lock_wait_flags(lp, flags);
83 }
84
85 static inline int arch_spin_trylock(arch_spinlock_t *lp)
86 {
87         if (!arch_spin_trylock_once(lp))
88                 return arch_spin_trylock_retry(lp);
89         return 1;
90 }
91
92 static inline void arch_spin_unlock(arch_spinlock_t *lp)
93 {
94         arch_spin_tryrelease_once(lp);
95 }
96
97 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
98 {
99         while (arch_spin_is_locked(lock))
100                 arch_spin_relax(lock);
101 }
102
103 /*
104  * Read-write spinlocks, allowing multiple readers
105  * but only one writer.
106  *
107  * NOTE! it is quite common to have readers in interrupts
108  * but no interrupt writers. For those circumstances we
109  * can "mix" irq-safe locks - any writer needs to get a
110  * irq-safe write-lock, but readers can get non-irqsafe
111  * read-locks.
112  */
113
114 /**
115  * read_can_lock - would read_trylock() succeed?
116  * @lock: the rwlock in question.
117  */
118 #define arch_read_can_lock(x) ((int)(x)->lock >= 0)
119
120 /**
121  * write_can_lock - would write_trylock() succeed?
122  * @lock: the rwlock in question.
123  */
124 #define arch_write_can_lock(x) ((x)->lock == 0)
125
126 extern void _raw_read_lock_wait(arch_rwlock_t *lp);
127 extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
128 extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
129 extern void _raw_write_lock_wait(arch_rwlock_t *lp);
130 extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags);
131 extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
132
133 static inline int arch_read_trylock_once(arch_rwlock_t *rw)
134 {
135         unsigned int old = ACCESS_ONCE(rw->lock);
136         return likely((int) old >= 0 &&
137                       _raw_compare_and_swap(&rw->lock, old, old + 1));
138 }
139
140 static inline int arch_write_trylock_once(arch_rwlock_t *rw)
141 {
142         unsigned int old = ACCESS_ONCE(rw->lock);
143         return likely(old == 0 &&
144                       _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
145 }
146
147 static inline void arch_read_lock(arch_rwlock_t *rw)
148 {
149         if (!arch_read_trylock_once(rw))
150                 _raw_read_lock_wait(rw);
151 }
152
153 static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags)
154 {
155         if (!arch_read_trylock_once(rw))
156                 _raw_read_lock_wait_flags(rw, flags);
157 }
158
159 static inline void arch_read_unlock(arch_rwlock_t *rw)
160 {
161         unsigned int old;
162
163         do {
164                 old = ACCESS_ONCE(rw->lock);
165         } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
166 }
167
168 static inline void arch_write_lock(arch_rwlock_t *rw)
169 {
170         if (!arch_write_trylock_once(rw))
171                 _raw_write_lock_wait(rw);
172 }
173
174 static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags)
175 {
176         if (!arch_write_trylock_once(rw))
177                 _raw_write_lock_wait_flags(rw, flags);
178 }
179
180 static inline void arch_write_unlock(arch_rwlock_t *rw)
181 {
182         _raw_compare_and_swap(&rw->lock, 0x80000000, 0);
183 }
184
185 static inline int arch_read_trylock(arch_rwlock_t *rw)
186 {
187         if (!arch_read_trylock_once(rw))
188                 return _raw_read_trylock_retry(rw);
189         return 1;
190 }
191
192 static inline int arch_write_trylock(arch_rwlock_t *rw)
193 {
194         if (!arch_write_trylock_once(rw))
195                 return _raw_write_trylock_retry(rw);
196         return 1;
197 }
198
199 #define arch_read_relax(lock)   cpu_relax()
200 #define arch_write_relax(lock)  cpu_relax()
201
202 #endif /* __ASM_SPINLOCK_H */