x86: fix breakage of vSMP irq operations
[sfrench/cifs-2.6.git] / include / asm-x86 / cmpxchg_32.h
1 #ifndef __ASM_CMPXCHG_H
2 #define __ASM_CMPXCHG_H
3
4 #include <linux/bitops.h> /* for LOCK_PREFIX */
5
6 /*
7  * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8  *       you need to test for the feature in boot_cpu_data.
9  */
10
11 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
12
13 struct __xchg_dummy { unsigned long a[100]; };
14 #define __xg(x) ((struct __xchg_dummy *)(x))
15
16 /*
17  * The semantics of XCHGCMP8B are a bit strange, this is why
18  * there is a loop and the loading of %%eax and %%edx has to
19  * be inside. This inlines well in most cases, the cached
20  * cost is around ~38 cycles. (in the future we might want
21  * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
22  * might have an implicit FPU-save as a cost, so it's not
23  * clear which path to go.)
24  *
25  * cmpxchg8b must be used with the lock prefix here to allow
26  * the instruction to be executed atomically, see page 3-102
27  * of the instruction set reference 24319102.pdf. We need
28  * the reader side to see the coherent 64bit value.
29  */
30 static inline void __set_64bit (unsigned long long * ptr,
31                 unsigned int low, unsigned int high)
32 {
33         __asm__ __volatile__ (
34                 "\n1:\t"
35                 "movl (%0), %%eax\n\t"
36                 "movl 4(%0), %%edx\n\t"
37                 LOCK_PREFIX "cmpxchg8b (%0)\n\t"
38                 "jnz 1b"
39                 : /* no outputs */
40                 :       "D"(ptr),
41                         "b"(low),
42                         "c"(high)
43                 :       "ax","dx","memory");
44 }
45
46 static inline void __set_64bit_constant (unsigned long long *ptr,
47                                                  unsigned long long value)
48 {
49         __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
50 }
51 #define ll_low(x)       *(((unsigned int*)&(x))+0)
52 #define ll_high(x)      *(((unsigned int*)&(x))+1)
53
54 static inline void __set_64bit_var (unsigned long long *ptr,
55                          unsigned long long value)
56 {
57         __set_64bit(ptr,ll_low(value), ll_high(value));
58 }
59
60 #define set_64bit(ptr,value) \
61 (__builtin_constant_p(value) ? \
62  __set_64bit_constant(ptr, value) : \
63  __set_64bit_var(ptr, value) )
64
65 #define _set_64bit(ptr,value) \
66 (__builtin_constant_p(value) ? \
67  __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
68  __set_64bit(ptr, ll_low(value), ll_high(value)) )
69
70 /*
71  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
72  * Note 2: xchg has side effect, so that attribute volatile is necessary,
73  *        but generally the primitive is invalid, *ptr is output argument. --ANK
74  */
75 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
76 {
77         switch (size) {
78                 case 1:
79                         __asm__ __volatile__("xchgb %b0,%1"
80                                 :"=q" (x)
81                                 :"m" (*__xg(ptr)), "0" (x)
82                                 :"memory");
83                         break;
84                 case 2:
85                         __asm__ __volatile__("xchgw %w0,%1"
86                                 :"=r" (x)
87                                 :"m" (*__xg(ptr)), "0" (x)
88                                 :"memory");
89                         break;
90                 case 4:
91                         __asm__ __volatile__("xchgl %0,%1"
92                                 :"=r" (x)
93                                 :"m" (*__xg(ptr)), "0" (x)
94                                 :"memory");
95                         break;
96         }
97         return x;
98 }
99
100 /*
101  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
102  * store NEW in MEM.  Return the initial value in MEM.  Success is
103  * indicated by comparing RETURN with OLD.
104  */
105
106 #ifdef CONFIG_X86_CMPXCHG
107 #define __HAVE_ARCH_CMPXCHG 1
108 #define cmpxchg(ptr, o, n)                                                   \
109         ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),            \
110                                         (unsigned long)(n), sizeof(*(ptr))))
111 #define sync_cmpxchg(ptr, o, n)                                              \
112         ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o),       \
113                                         (unsigned long)(n), sizeof(*(ptr))))
114 #define cmpxchg_local(ptr, o, n)                                             \
115         ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),      \
116                                         (unsigned long)(n), sizeof(*(ptr))))
117 #endif
118
119 #ifdef CONFIG_X86_CMPXCHG64
120 #define cmpxchg64(ptr, o, n)                                                  \
121         ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o),      \
122                                         (unsigned long long)(n)))
123 #define cmpxchg64_local(ptr, o, n)                                            \
124         ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\
125                                         (unsigned long long)(n)))
126 #endif
127
128 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
129                                       unsigned long new, int size)
130 {
131         unsigned long prev;
132         switch (size) {
133         case 1:
134                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
135                                      : "=a"(prev)
136                                      : "q"(new), "m"(*__xg(ptr)), "0"(old)
137                                      : "memory");
138                 return prev;
139         case 2:
140                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
141                                      : "=a"(prev)
142                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
143                                      : "memory");
144                 return prev;
145         case 4:
146                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
147                                      : "=a"(prev)
148                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
149                                      : "memory");
150                 return prev;
151         }
152         return old;
153 }
154
155 /*
156  * Always use locked operations when touching memory shared with a
157  * hypervisor, since the system may be SMP even if the guest kernel
158  * isn't.
159  */
160 static inline unsigned long __sync_cmpxchg(volatile void *ptr,
161                                             unsigned long old,
162                                             unsigned long new, int size)
163 {
164         unsigned long prev;
165         switch (size) {
166         case 1:
167                 __asm__ __volatile__("lock; cmpxchgb %b1,%2"
168                                      : "=a"(prev)
169                                      : "q"(new), "m"(*__xg(ptr)), "0"(old)
170                                      : "memory");
171                 return prev;
172         case 2:
173                 __asm__ __volatile__("lock; cmpxchgw %w1,%2"
174                                      : "=a"(prev)
175                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
176                                      : "memory");
177                 return prev;
178         case 4:
179                 __asm__ __volatile__("lock; cmpxchgl %1,%2"
180                                      : "=a"(prev)
181                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
182                                      : "memory");
183                 return prev;
184         }
185         return old;
186 }
187
188 static inline unsigned long __cmpxchg_local(volatile void *ptr,
189                         unsigned long old, unsigned long new, int size)
190 {
191         unsigned long prev;
192         switch (size) {
193         case 1:
194                 __asm__ __volatile__("cmpxchgb %b1,%2"
195                                      : "=a"(prev)
196                                      : "q"(new), "m"(*__xg(ptr)), "0"(old)
197                                      : "memory");
198                 return prev;
199         case 2:
200                 __asm__ __volatile__("cmpxchgw %w1,%2"
201                                      : "=a"(prev)
202                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
203                                      : "memory");
204                 return prev;
205         case 4:
206                 __asm__ __volatile__("cmpxchgl %1,%2"
207                                      : "=a"(prev)
208                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
209                                      : "memory");
210                 return prev;
211         }
212         return old;
213 }
214
215 static inline unsigned long long __cmpxchg64(volatile void *ptr,
216                         unsigned long long old, unsigned long long new)
217 {
218         unsigned long long prev;
219         __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
220                              : "=A"(prev)
221                              : "b"((unsigned long)new),
222                                "c"((unsigned long)(new >> 32)),
223                                "m"(*__xg(ptr)),
224                                "0"(old)
225                              : "memory");
226         return prev;
227 }
228
229 static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
230                         unsigned long long old, unsigned long long new)
231 {
232         unsigned long long prev;
233         __asm__ __volatile__("cmpxchg8b %3"
234                              : "=A"(prev)
235                              : "b"((unsigned long)new),
236                                "c"((unsigned long)(new >> 32)),
237                                "m"(*__xg(ptr)),
238                                "0"(old)
239                              : "memory");
240         return prev;
241 }
242
243 #ifndef CONFIG_X86_CMPXCHG
244 /*
245  * Building a kernel capable running on 80386. It may be necessary to
246  * simulate the cmpxchg on the 80386 CPU. For that purpose we define
247  * a function for each of the sizes we support.
248  */
249
250 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
251 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
252 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
253
254 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
255                                       unsigned long new, int size)
256 {
257         switch (size) {
258         case 1:
259                 return cmpxchg_386_u8(ptr, old, new);
260         case 2:
261                 return cmpxchg_386_u16(ptr, old, new);
262         case 4:
263                 return cmpxchg_386_u32(ptr, old, new);
264         }
265         return old;
266 }
267
268 #define cmpxchg(ptr, o, n)                                              \
269 ({                                                                      \
270         __typeof__(*(ptr)) __ret;                                       \
271         if (likely(boot_cpu_data.x86 > 3))                              \
272                 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr),            \
273                                 (unsigned long)(o), (unsigned long)(n), \
274                                 sizeof(*(ptr)));                        \
275         else                                                            \
276                 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr),          \
277                                 (unsigned long)(o), (unsigned long)(n), \
278                                 sizeof(*(ptr)));                        \
279         __ret;                                                          \
280 })
281 #define cmpxchg_local(ptr, o, n)                                        \
282 ({                                                                      \
283         __typeof__(*(ptr)) __ret;                                       \
284         if (likely(boot_cpu_data.x86 > 3))                              \
285                 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr),      \
286                                 (unsigned long)(o), (unsigned long)(n), \
287                                 sizeof(*(ptr)));                        \
288         else                                                            \
289                 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr),          \
290                                 (unsigned long)(o), (unsigned long)(n), \
291                                 sizeof(*(ptr)));                        \
292         __ret;                                                          \
293 })
294 #endif
295
296 #ifndef CONFIG_X86_CMPXCHG64
297 /*
298  * Building a kernel capable running on 80386 and 80486. It may be necessary
299  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
300  */
301
302 extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
303
304 #define cmpxchg64(ptr, o, n)                                            \
305 ({                                                                      \
306         __typeof__(*(ptr)) __ret;                                       \
307         if (likely(boot_cpu_data.x86 > 4))                              \
308                 __ret = (__typeof__(*(ptr)))__cmpxchg64((ptr),          \
309                                 (unsigned long long)(o),                \
310                                 (unsigned long long)(n));               \
311         else                                                            \
312                 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr),      \
313                                 (unsigned long long)(o),                \
314                                 (unsigned long long)(n));               \
315         __ret;                                                          \
316 })
317 #define cmpxchg64_local(ptr, o, n)                                      \
318 ({                                                                      \
319         __typeof__(*(ptr)) __ret;                                       \
320         if (likely(boot_cpu_data.x86 > 4))                              \
321                 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr),    \
322                                 (unsigned long long)(o),                \
323                                 (unsigned long long)(n));               \
324         else                                                            \
325                 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr),      \
326                                 (unsigned long long)(o),                \
327                                 (unsigned long long)(n));               \
328         __ret;                                                          \
329 })
330
331 #endif
332
333 #endif