4 #include <asm/segment.h>
5 #include <asm/cpufeature.h>
6 #include <asm/cmpxchg.h>
9 #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
11 struct task_struct; /* one of the stranger aspects of C forward declarations.. */
12 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
15 * Saving eflags is important. It switches not only IOPL between tasks,
16 * it also protects other tasks from NT leaking through sysenter etc.
18 #define switch_to(prev,next,last) do { \
19 unsigned long esi,edi; \
20 asm volatile("pushfl\n\t" /* Save flags */ \
22 "movl %%esp,%0\n\t" /* save ESP */ \
23 "movl %5,%%esp\n\t" /* restore ESP */ \
24 "movl $1f,%1\n\t" /* save EIP */ \
25 "pushl %6\n\t" /* restore EIP */ \
30 :"=m" (prev->thread.sp),"=m" (prev->thread.ip), \
31 "=a" (last),"=S" (esi),"=D" (edi) \
32 :"m" (next->thread.sp),"m" (next->thread.ip), \
33 "2" (prev), "d" (next)); \
36 #endif /* __KERNEL__ */
40 * Force strict CPU ordering.
41 * And yes, this is required on UP too when we're talking
44 * For now, "wmb()" doesn't actually do anything, as all
45 * Intel CPU's follow what Intel calls a *Processor Order*,
46 * in which all writes are seen in the program order even
49 * I expect future Intel CPU's to have a weaker ordering,
50 * but I'd also expect them to finally get their act together
51 * and add some real memory barriers if so.
53 * Some non intel clones support out of order store. wmb() ceases to be a
58 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
59 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
60 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
63 * read_barrier_depends - Flush all pending reads that subsequents reads
66 * No data-dependent reads from memory-like regions are ever reordered
67 * over this barrier. All reads preceding this primitive are guaranteed
68 * to access memory (but not necessarily other CPUs' caches) before any
69 * reads following this primitive that depend on the data return by
70 * any of the preceding reads. This primitive is much lighter weight than
71 * rmb() on most CPUs, and is never heavier weight than is
74 * These ordering constraints are respected by both the local CPU
77 * Ordering is not guaranteed by anything other than these primitives,
78 * not even by data dependencies. See the documentation for
79 * memory_barrier() for examples and URLs to more information.
81 * For example, the following code would force ordering (the initial
82 * value of "a" is zero, "b" is one, and "p" is "&a"):
90 * read_barrier_depends();
94 * because the read of "*q" depends on the read of "p" and these
95 * two reads are separated by a read_barrier_depends(). However,
96 * the following code, with the same initial values for "a" and "b":
104 * read_barrier_depends();
108 * does not enforce ordering, since there is no data dependency between
109 * the read of "a" and the read of "b". Therefore, on some CPUs, such
110 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
111 * in cases like this where there are no data dependencies.
114 #define read_barrier_depends() do { } while(0)
117 #define smp_mb() mb()
118 #ifdef CONFIG_X86_PPRO_FENCE
119 # define smp_rmb() rmb()
121 # define smp_rmb() barrier()
123 #ifdef CONFIG_X86_OOSTORE
124 # define smp_wmb() wmb()
126 # define smp_wmb() barrier()
128 #define smp_read_barrier_depends() read_barrier_depends()
129 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
131 #define smp_mb() barrier()
132 #define smp_rmb() barrier()
133 #define smp_wmb() barrier()
134 #define smp_read_barrier_depends() do { } while(0)
135 #define set_mb(var, value) do { var = value; barrier(); } while (0)
138 #include <linux/irqflags.h>
141 * disable hlt during certain critical i/o operations
143 #define HAVE_DISABLE_HLT