x86: unify paravirt parts of system.h
[sfrench/cifs-2.6.git] / include / asm-x86 / system_64.h
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
3
4 #include <asm/segment.h>
5 #include <asm/cmpxchg.h>
6
7 #ifdef __KERNEL__
8
9 /* entries in ARCH_DLINFO: */
10 #ifdef CONFIG_IA32_EMULATION
11 # define AT_VECTOR_SIZE_ARCH 2
12 #else
13 # define AT_VECTOR_SIZE_ARCH 1
14 #endif
15
16 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
17 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
18
19 /* frame pointer must be last for get_wchan */
20 #define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
21 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
22
23 #define __EXTRA_CLOBBER  \
24         ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
25
26 /* Save restore flags to clear handle leaking NT */
27 #define switch_to(prev,next,last) \
28         asm volatile(SAVE_CONTEXT                                                   \
29                      "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
30                      "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */    \
31                      "call __switch_to\n\t"                                       \
32                      ".globl thread_return\n"                                   \
33                      "thread_return:\n\t"                                           \
34                      "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"                       \
35                      "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
36                      LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"        \
37                      "movq %%rax,%%rdi\n\t"                                       \
38                      "jc   ret_from_fork\n\t"                                     \
39                      RESTORE_CONTEXT                                                \
40                      : "=a" (last)                                                \
41                      : [next] "S" (next), [prev] "D" (prev),                      \
42                        [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
43                        [ti_flags] "i" (offsetof(struct thread_info, flags)),\
44                        [tif_fork] "i" (TIF_FORK),                         \
45                        [thread_info] "i" (offsetof(struct task_struct, stack)), \
46                        [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
47                      : "memory", "cc" __EXTRA_CLOBBER)
48     
49 #endif  /* __KERNEL__ */
50
51 #ifdef CONFIG_SMP
52 #define smp_mb()        mb()
53 #define smp_rmb()       barrier()
54 #define smp_wmb()       barrier()
55 #define smp_read_barrier_depends()      do {} while(0)
56 #else
57 #define smp_mb()        barrier()
58 #define smp_rmb()       barrier()
59 #define smp_wmb()       barrier()
60 #define smp_read_barrier_depends()      do {} while(0)
61 #endif
62
63     
64 /*
65  * Force strict CPU ordering.
66  * And yes, this is required on UP too when we're talking
67  * to devices.
68  */
69 #define mb()    asm volatile("mfence":::"memory")
70 #define rmb()   asm volatile("lfence":::"memory")
71 #define wmb()   asm volatile("sfence" ::: "memory")
72
73 #define read_barrier_depends()  do {} while(0)
74 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
75
76 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
77
78 static inline unsigned long read_cr8(void)
79 {
80         unsigned long cr8;
81         asm volatile("movq %%cr8,%0" : "=r" (cr8));
82         return cr8;
83 }
84
85 static inline void write_cr8(unsigned long val)
86 {
87         asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
88 }
89
90 #include <linux/irqflags.h>
91
92 #endif