x86: fix breakage of vSMP irq operations
[sfrench/cifs-2.6.git] / include / asm-x86 / irqflags.h
1 #ifndef _X86_IRQFLAGS_H_
2 #define _X86_IRQFLAGS_H_
3
4 #include <asm/processor-flags.h>
5
6 #ifndef __ASSEMBLY__
7 /*
8  * Interrupt control:
9  */
10
11 static inline unsigned long native_save_fl(void)
12 {
13         unsigned long flags;
14
15         __asm__ __volatile__(
16                 "# __raw_save_flags\n\t"
17                 "pushf ; pop %0"
18                 : "=g" (flags)
19                 : /* no input */
20                 : "memory"
21         );
22
23         return flags;
24 }
25
26 static inline void native_restore_fl(unsigned long flags)
27 {
28         __asm__ __volatile__(
29                 "push %0 ; popf"
30                 : /* no output */
31                 :"g" (flags)
32                 :"memory", "cc"
33         );
34 }
35
36 static inline void native_irq_disable(void)
37 {
38         asm volatile("cli": : :"memory");
39 }
40
41 static inline void native_irq_enable(void)
42 {
43         asm volatile("sti": : :"memory");
44 }
45
46 static inline void native_safe_halt(void)
47 {
48         asm volatile("sti; hlt": : :"memory");
49 }
50
51 static inline void native_halt(void)
52 {
53         asm volatile("hlt": : :"memory");
54 }
55
56 #endif
57
58 #ifdef CONFIG_PARAVIRT
59 #include <asm/paravirt.h>
60 #else
61 #ifndef __ASSEMBLY__
62
63 static inline unsigned long __raw_local_save_flags(void)
64 {
65         return native_save_fl();
66 }
67
68 static inline void raw_local_irq_restore(unsigned long flags)
69 {
70         native_restore_fl(flags);
71 }
72
73 #ifdef CONFIG_X86_VSMP
74
75 /*
76  * Interrupt control for the VSMP architecture:
77  */
78
79 static inline void raw_local_irq_disable(void)
80 {
81         unsigned long flags = __raw_local_save_flags();
82         raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
83 }
84
85 static inline void raw_local_irq_enable(void)
86 {
87         unsigned long flags = __raw_local_save_flags();
88         raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
89 }
90
91 #else
92
93 static inline void raw_local_irq_disable(void)
94 {
95         native_irq_disable();
96 }
97
98 static inline void raw_local_irq_enable(void)
99 {
100         native_irq_enable();
101 }
102
103 #endif
104
105 /*
106  * Used in the idle loop; sti takes one instruction cycle
107  * to complete:
108  */
109 static inline void raw_safe_halt(void)
110 {
111         native_safe_halt();
112 }
113
114 /*
115  * Used when interrupts are already enabled or to
116  * shutdown the processor:
117  */
118 static inline void halt(void)
119 {
120         native_halt();
121 }
122
123 /*
124  * For spinlocks, etc:
125  */
126 static inline unsigned long __raw_local_irq_save(void)
127 {
128         unsigned long flags = __raw_local_save_flags();
129
130         raw_local_irq_disable();
131
132         return flags;
133 }
134 #else
135
136 #define ENABLE_INTERRUPTS(x)    sti
137 #define DISABLE_INTERRUPTS(x)   cli
138
139 #ifdef CONFIG_X86_64
140 #define INTERRUPT_RETURN        iretq
141 #define ENABLE_INTERRUPTS_SYSCALL_RET                   \
142                         movq    %gs:pda_oldrsp, %rsp;   \
143                         swapgs;                         \
144                         sysretq;
145 #else
146 #define INTERRUPT_RETURN                iret
147 #define ENABLE_INTERRUPTS_SYSCALL_RET   sti; sysexit
148 #define GET_CR0_INTO_EAX                movl %cr0, %eax
149 #endif
150
151
152 #endif /* __ASSEMBLY__ */
153 #endif /* CONFIG_PARAVIRT */
154
155 #ifndef __ASSEMBLY__
156 #define raw_local_save_flags(flags) \
157                 do { (flags) = __raw_local_save_flags(); } while (0)
158
159 #define raw_local_irq_save(flags) \
160                 do { (flags) = __raw_local_irq_save(); } while (0)
161
162 #ifdef CONFIG_X86_VSMP
163 static inline int raw_irqs_disabled_flags(unsigned long flags)
164 {
165         return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC);
166 }
167 #else
168 static inline int raw_irqs_disabled_flags(unsigned long flags)
169 {
170         return !(flags & X86_EFLAGS_IF);
171 }
172 #endif
173
174 static inline int raw_irqs_disabled(void)
175 {
176         unsigned long flags = __raw_local_save_flags();
177
178         return raw_irqs_disabled_flags(flags);
179 }
180
181 /*
182  * makes the traced hardirq state match with the machine state
183  *
184  * should be a rarely used function, only in places where its
185  * otherwise impossible to know the irq state, like in traps.
186  */
187 static inline void trace_hardirqs_fixup_flags(unsigned long flags)
188 {
189         if (raw_irqs_disabled_flags(flags))
190                 trace_hardirqs_off();
191         else
192                 trace_hardirqs_on();
193 }
194
195 static inline void trace_hardirqs_fixup(void)
196 {
197         unsigned long flags = __raw_local_save_flags();
198
199         trace_hardirqs_fixup_flags(flags);
200 }
201
202 #else
203
204 #ifdef CONFIG_X86_64
205 /*
206  * Currently paravirt can't handle swapgs nicely when we
207  * don't have a stack we can rely on (such as a user space
208  * stack).  So we either find a way around these or just fault
209  * and emulate if a guest tries to call swapgs directly.
210  *
211  * Either way, this is a good way to document that we don't
212  * have a reliable stack. x86_64 only.
213  */
214 #define SWAPGS_UNSAFE_STACK     swapgs
215 #define ARCH_TRACE_IRQS_ON              call trace_hardirqs_on_thunk
216 #define ARCH_TRACE_IRQS_OFF             call trace_hardirqs_off_thunk
217 #define ARCH_LOCKDEP_SYS_EXIT           call lockdep_sys_exit_thunk
218 #define ARCH_LOCKDEP_SYS_EXIT_IRQ       \
219         TRACE_IRQS_ON; \
220         sti; \
221         SAVE_REST; \
222         LOCKDEP_SYS_EXIT; \
223         RESTORE_REST; \
224         cli; \
225         TRACE_IRQS_OFF;
226
227 #else
228 #define ARCH_TRACE_IRQS_ON                      \
229         pushl %eax;                             \
230         pushl %ecx;                             \
231         pushl %edx;                             \
232         call trace_hardirqs_on;                 \
233         popl %edx;                              \
234         popl %ecx;                              \
235         popl %eax;
236
237 #define ARCH_TRACE_IRQS_OFF                     \
238         pushl %eax;                             \
239         pushl %ecx;                             \
240         pushl %edx;                             \
241         call trace_hardirqs_off;                \
242         popl %edx;                              \
243         popl %ecx;                              \
244         popl %eax;
245
246 #define ARCH_LOCKDEP_SYS_EXIT                   \
247         pushl %eax;                             \
248         pushl %ecx;                             \
249         pushl %edx;                             \
250         call lockdep_sys_exit;                  \
251         popl %edx;                              \
252         popl %ecx;                              \
253         popl %eax;
254
255 #define ARCH_LOCKDEP_SYS_EXIT_IRQ
256 #endif
257
258 #ifdef CONFIG_TRACE_IRQFLAGS
259 #  define TRACE_IRQS_ON         ARCH_TRACE_IRQS_ON
260 #  define TRACE_IRQS_OFF        ARCH_TRACE_IRQS_OFF
261 #else
262 #  define TRACE_IRQS_ON
263 #  define TRACE_IRQS_OFF
264 #endif
265 #ifdef CONFIG_DEBUG_LOCK_ALLOC
266 #  define LOCKDEP_SYS_EXIT      ARCH_LOCKDEP_SYS_EXIT
267 #  define LOCKDEP_SYS_EXIT_IRQ  ARCH_LOCKDEP_SYS_EXIT_IRQ
268 # else
269 #  define LOCKDEP_SYS_EXIT
270 #  define LOCKDEP_SYS_EXIT_IRQ
271 # endif
272
273 #endif /* __ASSEMBLY__ */
274 #endif