x86: provide paravirtualized hook for rdtscp
[sfrench/cifs-2.6.git] / include / asm-x86 / paravirt.h
1 #ifndef __ASM_PARAVIRT_H
2 #define __ASM_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/page.h>
8
9 /* Bitmask of what can be clobbered: usually at least eax. */
10 #define CLBR_NONE 0x0
11 #define CLBR_EAX 0x1
12 #define CLBR_ECX 0x2
13 #define CLBR_EDX 0x4
14 #define CLBR_ANY 0x7
15
16 #ifndef __ASSEMBLY__
17 #include <linux/types.h>
18 #include <linux/cpumask.h>
19 #include <asm/kmap_types.h>
20 #include <asm/desc_defs.h>
21
22 struct page;
23 struct thread_struct;
24 struct desc_ptr;
25 struct tss_struct;
26 struct mm_struct;
27 struct desc_struct;
28
29 /* general info */
30 struct pv_info {
31         unsigned int kernel_rpl;
32         int shared_kernel_pmd;
33         int paravirt_enabled;
34         const char *name;
35 };
36
37 struct pv_init_ops {
38         /*
39          * Patch may replace one of the defined code sequences with
40          * arbitrary code, subject to the same register constraints.
41          * This generally means the code is not free to clobber any
42          * registers other than EAX.  The patch function should return
43          * the number of bytes of code generated, as we nop pad the
44          * rest in generic code.
45          */
46         unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
47                           unsigned long addr, unsigned len);
48
49         /* Basic arch-specific setup */
50         void (*arch_setup)(void);
51         char *(*memory_setup)(void);
52         void (*post_allocator_init)(void);
53
54         /* Print a banner to identify the environment */
55         void (*banner)(void);
56 };
57
58
59 struct pv_lazy_ops {
60         /* Set deferred update mode, used for batching operations. */
61         void (*enter)(void);
62         void (*leave)(void);
63 };
64
65 struct pv_time_ops {
66         void (*time_init)(void);
67
68         /* Set and set time of day */
69         unsigned long (*get_wallclock)(void);
70         int (*set_wallclock)(unsigned long);
71
72         unsigned long long (*sched_clock)(void);
73         unsigned long (*get_cpu_khz)(void);
74 };
75
76 struct pv_cpu_ops {
77         /* hooks for various privileged instructions */
78         unsigned long (*get_debugreg)(int regno);
79         void (*set_debugreg)(int regno, unsigned long value);
80
81         void (*clts)(void);
82
83         unsigned long (*read_cr0)(void);
84         void (*write_cr0)(unsigned long);
85
86         unsigned long (*read_cr4_safe)(void);
87         unsigned long (*read_cr4)(void);
88         void (*write_cr4)(unsigned long);
89
90         /* Segment descriptor handling */
91         void (*load_tr_desc)(void);
92         void (*load_gdt)(const struct desc_ptr *);
93         void (*load_idt)(const struct desc_ptr *);
94         void (*store_gdt)(struct desc_ptr *);
95         void (*store_idt)(struct desc_ptr *);
96         void (*set_ldt)(const void *desc, unsigned entries);
97         unsigned long (*store_tr)(void);
98         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
99         void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
100                                 const void *desc);
101         void (*write_gdt_entry)(struct desc_struct *,
102                                 int entrynum, const void *desc, int size);
103         void (*write_idt_entry)(gate_desc *,
104                                 int entrynum, const gate_desc *gate);
105         void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
106
107         void (*set_iopl_mask)(unsigned mask);
108
109         void (*wbinvd)(void);
110         void (*io_delay)(void);
111
112         /* cpuid emulation, mostly so that caps bits can be disabled */
113         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
114                       unsigned int *ecx, unsigned int *edx);
115
116         /* MSR, PMC and TSR operations.
117            err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
118         u64 (*read_msr)(unsigned int msr, int *err);
119         int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
120
121         u64 (*read_tsc)(void);
122         u64 (*read_pmc)(int counter);
123         unsigned long long (*read_tscp)(unsigned int *aux);
124
125         /* These two are jmp to, not actually called. */
126         void (*irq_enable_syscall_ret)(void);
127         void (*iret)(void);
128
129         struct pv_lazy_ops lazy_mode;
130 };
131
132 struct pv_irq_ops {
133         void (*init_IRQ)(void);
134
135         /*
136          * Get/set interrupt state.  save_fl and restore_fl are only
137          * expected to use X86_EFLAGS_IF; all other bits
138          * returned from save_fl are undefined, and may be ignored by
139          * restore_fl.
140          */
141         unsigned long (*save_fl)(void);
142         void (*restore_fl)(unsigned long);
143         void (*irq_disable)(void);
144         void (*irq_enable)(void);
145         void (*safe_halt)(void);
146         void (*halt)(void);
147 };
148
149 struct pv_apic_ops {
150 #ifdef CONFIG_X86_LOCAL_APIC
151         /*
152          * Direct APIC operations, principally for VMI.  Ideally
153          * these shouldn't be in this interface.
154          */
155         void (*apic_write)(unsigned long reg, u32 v);
156         void (*apic_write_atomic)(unsigned long reg, u32 v);
157         u32 (*apic_read)(unsigned long reg);
158         void (*setup_boot_clock)(void);
159         void (*setup_secondary_clock)(void);
160
161         void (*startup_ipi_hook)(int phys_apicid,
162                                  unsigned long start_eip,
163                                  unsigned long start_esp);
164 #endif
165 };
166
167 struct pv_mmu_ops {
168         /*
169          * Called before/after init_mm pagetable setup. setup_start
170          * may reset %cr3, and may pre-install parts of the pagetable;
171          * pagetable setup is expected to preserve any existing
172          * mapping.
173          */
174         void (*pagetable_setup_start)(pgd_t *pgd_base);
175         void (*pagetable_setup_done)(pgd_t *pgd_base);
176
177         unsigned long (*read_cr2)(void);
178         void (*write_cr2)(unsigned long);
179
180         unsigned long (*read_cr3)(void);
181         void (*write_cr3)(unsigned long);
182
183         /*
184          * Hooks for intercepting the creation/use/destruction of an
185          * mm_struct.
186          */
187         void (*activate_mm)(struct mm_struct *prev,
188                             struct mm_struct *next);
189         void (*dup_mmap)(struct mm_struct *oldmm,
190                          struct mm_struct *mm);
191         void (*exit_mmap)(struct mm_struct *mm);
192
193
194         /* TLB operations */
195         void (*flush_tlb_user)(void);
196         void (*flush_tlb_kernel)(void);
197         void (*flush_tlb_single)(unsigned long addr);
198         void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
199                                  unsigned long va);
200
201         /* Hooks for allocating/releasing pagetable pages */
202         void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
203         void (*alloc_pd)(u32 pfn);
204         void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
205         void (*release_pt)(u32 pfn);
206         void (*release_pd)(u32 pfn);
207
208         /* Pagetable manipulation functions */
209         void (*set_pte)(pte_t *ptep, pte_t pteval);
210         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
211                            pte_t *ptep, pte_t pteval);
212         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
213         void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
214         void (*pte_update_defer)(struct mm_struct *mm,
215                                  unsigned long addr, pte_t *ptep);
216
217 #ifdef CONFIG_X86_PAE
218         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
219         void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
220                                 pte_t *ptep, pte_t pte);
221         void (*set_pud)(pud_t *pudp, pud_t pudval);
222         void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
223         void (*pmd_clear)(pmd_t *pmdp);
224
225         unsigned long long (*pte_val)(pte_t);
226         unsigned long long (*pmd_val)(pmd_t);
227         unsigned long long (*pgd_val)(pgd_t);
228
229         pte_t (*make_pte)(unsigned long long pte);
230         pmd_t (*make_pmd)(unsigned long long pmd);
231         pgd_t (*make_pgd)(unsigned long long pgd);
232 #else
233         unsigned long (*pte_val)(pte_t);
234         unsigned long (*pgd_val)(pgd_t);
235
236         pte_t (*make_pte)(unsigned long pte);
237         pgd_t (*make_pgd)(unsigned long pgd);
238 #endif
239
240 #ifdef CONFIG_HIGHPTE
241         void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
242 #endif
243
244         struct pv_lazy_ops lazy_mode;
245 };
246
247 /* This contains all the paravirt structures: we get a convenient
248  * number for each function using the offset which we use to indicate
249  * what to patch. */
250 struct paravirt_patch_template
251 {
252         struct pv_init_ops pv_init_ops;
253         struct pv_time_ops pv_time_ops;
254         struct pv_cpu_ops pv_cpu_ops;
255         struct pv_irq_ops pv_irq_ops;
256         struct pv_apic_ops pv_apic_ops;
257         struct pv_mmu_ops pv_mmu_ops;
258 };
259
260 extern struct pv_info pv_info;
261 extern struct pv_init_ops pv_init_ops;
262 extern struct pv_time_ops pv_time_ops;
263 extern struct pv_cpu_ops pv_cpu_ops;
264 extern struct pv_irq_ops pv_irq_ops;
265 extern struct pv_apic_ops pv_apic_ops;
266 extern struct pv_mmu_ops pv_mmu_ops;
267
268 #define PARAVIRT_PATCH(x)                                       \
269         (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
270
271 #define paravirt_type(op)                               \
272         [paravirt_typenum] "i" (PARAVIRT_PATCH(op)),    \
273         [paravirt_opptr] "m" (op)
274 #define paravirt_clobber(clobber)               \
275         [paravirt_clobber] "i" (clobber)
276
277 /*
278  * Generate some code, and mark it as patchable by the
279  * apply_paravirt() alternate instruction patcher.
280  */
281 #define _paravirt_alt(insn_string, type, clobber)       \
282         "771:\n\t" insn_string "\n" "772:\n"            \
283         ".pushsection .parainstructions,\"a\"\n"        \
284         "  .long 771b\n"                                \
285         "  .byte " type "\n"                            \
286         "  .byte 772b-771b\n"                           \
287         "  .short " clobber "\n"                        \
288         ".popsection\n"
289
290 /* Generate patchable code, with the default asm parameters. */
291 #define paravirt_alt(insn_string)                                       \
292         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
293
294 unsigned paravirt_patch_nop(void);
295 unsigned paravirt_patch_ignore(unsigned len);
296 unsigned paravirt_patch_call(void *insnbuf,
297                              const void *target, u16 tgt_clobbers,
298                              unsigned long addr, u16 site_clobbers,
299                              unsigned len);
300 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
301                             unsigned long addr, unsigned len);
302 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
303                                 unsigned long addr, unsigned len);
304
305 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
306                               const char *start, const char *end);
307
308 int paravirt_disable_iospace(void);
309
310 /*
311  * This generates an indirect call based on the operation type number.
312  * The type number, computed in PARAVIRT_PATCH, is derived from the
313  * offset into the paravirt_patch_template structure, and can therefore be
314  * freely converted back into a structure offset.
315  */
316 #define PARAVIRT_CALL   "call *%[paravirt_opptr];"
317
318 /*
319  * These macros are intended to wrap calls through one of the paravirt
320  * ops structs, so that they can be later identified and patched at
321  * runtime.
322  *
323  * Normally, a call to a pv_op function is a simple indirect call:
324  * (pv_op_struct.operations)(args...).
325  *
326  * Unfortunately, this is a relatively slow operation for modern CPUs,
327  * because it cannot necessarily determine what the destination
328  * address is.  In this case, the address is a runtime constant, so at
329  * the very least we can patch the call to e a simple direct call, or
330  * ideally, patch an inline implementation into the callsite.  (Direct
331  * calls are essentially free, because the call and return addresses
332  * are completely predictable.)
333  *
334  * For i386, these macros rely on the standard gcc "regparm(3)" calling
335  * convention, in which the first three arguments are placed in %eax,
336  * %edx, %ecx (in that order), and the remaining arguments are placed
337  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
338  * to be modified (either clobbered or used for return values).
339  * X86_64, on the other hand, already specifies a register-based calling
340  * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
341  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
342  * special handling for dealing with 4 arguments, unlike i386.
343  * However, x86_64 also have to clobber all caller saved registers, which
344  * unfortunately, are quite a bit (r8 - r11)
345  *
346  * The call instruction itself is marked by placing its start address
347  * and size into the .parainstructions section, so that
348  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
349  * appropriate patching under the control of the backend pv_init_ops
350  * implementation.
351  *
352  * Unfortunately there's no way to get gcc to generate the args setup
353  * for the call, and then allow the call itself to be generated by an
354  * inline asm.  Because of this, we must do the complete arg setup and
355  * return value handling from within these macros.  This is fairly
356  * cumbersome.
357  *
358  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
359  * It could be extended to more arguments, but there would be little
360  * to be gained from that.  For each number of arguments, there are
361  * the two VCALL and CALL variants for void and non-void functions.
362  *
363  * When there is a return value, the invoker of the macro must specify
364  * the return type.  The macro then uses sizeof() on that type to
365  * determine whether its a 32 or 64 bit value, and places the return
366  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
367  * 64-bit). For x86_64 machines, it just returns at %rax regardless of
368  * the return value size.
369  *
370  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
371  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
372  * in low,high order
373  *
374  * Small structures are passed and returned in registers.  The macro
375  * calling convention can't directly deal with this, so the wrapper
376  * functions must do this.
377  *
378  * These PVOP_* macros are only defined within this header.  This
379  * means that all uses must be wrapped in inline functions.  This also
380  * makes sure the incoming and outgoing types are always correct.
381  */
382 #ifdef CONFIG_X86_32
383 #define PVOP_VCALL_ARGS                 unsigned long __eax, __edx, __ecx
384 #define PVOP_CALL_ARGS                  PVOP_VCALL_ARGS
385 #define PVOP_VCALL_CLOBBERS             "=a" (__eax), "=d" (__edx),     \
386                                         "=c" (__ecx)
387 #define PVOP_CALL_CLOBBERS              PVOP_VCALL_CLOBBERS
388 #define EXTRA_CLOBBERS
389 #define VEXTRA_CLOBBERS
390 #else
391 #define PVOP_VCALL_ARGS         unsigned long __edi, __esi, __edx, __ecx
392 #define PVOP_CALL_ARGS          PVOP_VCALL_ARGS, __eax
393 #define PVOP_VCALL_CLOBBERS     "=D" (__edi),                           \
394                                 "=S" (__esi), "=d" (__edx),             \
395                                 "=c" (__ecx)
396
397 #define PVOP_CALL_CLOBBERS      PVOP_VCALL_CLOBBERS, "=a" (__eax)
398
399 #define EXTRA_CLOBBERS   , "r8", "r9", "r10", "r11"
400 #define VEXTRA_CLOBBERS  , "rax", "r8", "r9", "r10", "r11"
401 #endif
402
403 #define __PVOP_CALL(rettype, op, pre, post, ...)                        \
404         ({                                                              \
405                 rettype __ret;                                          \
406                 PVOP_CALL_ARGS;                                 \
407                 /* This is 32-bit specific, but is okay in 64-bit */    \
408                 /* since this condition will never hold */              \
409                 if (sizeof(rettype) > sizeof(unsigned long)) {          \
410                         asm volatile(pre                                \
411                                      paravirt_alt(PARAVIRT_CALL)        \
412                                      post                               \
413                                      : PVOP_CALL_CLOBBERS               \
414                                      : paravirt_type(op),               \
415                                        paravirt_clobber(CLBR_ANY),      \
416                                        ##__VA_ARGS__                    \
417                                      : "memory", "cc" EXTRA_CLOBBERS);  \
418                         __ret = (rettype)((((u64)__edx) << 32) | __eax); \
419                 } else {                                                \
420                         asm volatile(pre                                \
421                                      paravirt_alt(PARAVIRT_CALL)        \
422                                      post                               \
423                                      : PVOP_CALL_CLOBBERS               \
424                                      : paravirt_type(op),               \
425                                        paravirt_clobber(CLBR_ANY),      \
426                                        ##__VA_ARGS__                    \
427                                      : "memory", "cc" EXTRA_CLOBBERS);  \
428                         __ret = (rettype)__eax;                         \
429                 }                                                       \
430                 __ret;                                                  \
431         })
432 #define __PVOP_VCALL(op, pre, post, ...)                                \
433         ({                                                              \
434                 PVOP_VCALL_ARGS;                                        \
435                 asm volatile(pre                                        \
436                              paravirt_alt(PARAVIRT_CALL)                \
437                              post                                       \
438                              : PVOP_VCALL_CLOBBERS                      \
439                              : paravirt_type(op),                       \
440                                paravirt_clobber(CLBR_ANY),              \
441                                ##__VA_ARGS__                            \
442                              : "memory", "cc" VEXTRA_CLOBBERS);         \
443         })
444
445 #define PVOP_CALL0(rettype, op)                                         \
446         __PVOP_CALL(rettype, op, "", "")
447 #define PVOP_VCALL0(op)                                                 \
448         __PVOP_VCALL(op, "", "")
449
450 #define PVOP_CALL1(rettype, op, arg1)                                   \
451         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
452 #define PVOP_VCALL1(op, arg1)                                           \
453         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
454
455 #define PVOP_CALL2(rettype, op, arg1, arg2)                             \
456         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
457         "1" ((unsigned long)(arg2)))
458 #define PVOP_VCALL2(op, arg1, arg2)                                     \
459         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
460         "1" ((unsigned long)(arg2)))
461
462 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                       \
463         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
464         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
465 #define PVOP_VCALL3(op, arg1, arg2, arg3)                               \
466         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
467         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
468
469 /* This is the only difference in x86_64. We can make it much simpler */
470 #ifdef CONFIG_X86_32
471 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
472         __PVOP_CALL(rettype, op,                                        \
473                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
474                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
475                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
476 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
477         __PVOP_VCALL(op,                                                \
478                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
479                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
480                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
481 #else
482 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
483         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
484         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
485         "3"((unsigned long)(arg4)))
486 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
487         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
488         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
489         "3"((unsigned long)(arg4)))
490 #endif
491
492 static inline int paravirt_enabled(void)
493 {
494         return pv_info.paravirt_enabled;
495 }
496
497 static inline void load_sp0(struct tss_struct *tss,
498                              struct thread_struct *thread)
499 {
500         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
501 }
502
503 #define ARCH_SETUP                      pv_init_ops.arch_setup();
504 static inline unsigned long get_wallclock(void)
505 {
506         return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
507 }
508
509 static inline int set_wallclock(unsigned long nowtime)
510 {
511         return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
512 }
513
514 static inline void (*choose_time_init(void))(void)
515 {
516         return pv_time_ops.time_init;
517 }
518
519 /* The paravirtualized CPUID instruction. */
520 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
521                            unsigned int *ecx, unsigned int *edx)
522 {
523         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
524 }
525
526 /*
527  * These special macros can be used to get or set a debugging register
528  */
529 static inline unsigned long paravirt_get_debugreg(int reg)
530 {
531         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
532 }
533 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
534 static inline void set_debugreg(unsigned long val, int reg)
535 {
536         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
537 }
538
539 static inline void clts(void)
540 {
541         PVOP_VCALL0(pv_cpu_ops.clts);
542 }
543
544 static inline unsigned long read_cr0(void)
545 {
546         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
547 }
548
549 static inline void write_cr0(unsigned long x)
550 {
551         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
552 }
553
554 static inline unsigned long read_cr2(void)
555 {
556         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
557 }
558
559 static inline void write_cr2(unsigned long x)
560 {
561         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
562 }
563
564 static inline unsigned long read_cr3(void)
565 {
566         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
567 }
568
569 static inline void write_cr3(unsigned long x)
570 {
571         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
572 }
573
574 static inline unsigned long read_cr4(void)
575 {
576         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
577 }
578 static inline unsigned long read_cr4_safe(void)
579 {
580         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
581 }
582
583 static inline void write_cr4(unsigned long x)
584 {
585         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
586 }
587
588 static inline void raw_safe_halt(void)
589 {
590         PVOP_VCALL0(pv_irq_ops.safe_halt);
591 }
592
593 static inline void halt(void)
594 {
595         PVOP_VCALL0(pv_irq_ops.safe_halt);
596 }
597
598 static inline void wbinvd(void)
599 {
600         PVOP_VCALL0(pv_cpu_ops.wbinvd);
601 }
602
603 #define get_kernel_rpl()  (pv_info.kernel_rpl)
604
605 static inline u64 paravirt_read_msr(unsigned msr, int *err)
606 {
607         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
608 }
609 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
610 {
611         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
612 }
613
614 /* These should all do BUG_ON(_err), but our headers are too tangled. */
615 #define rdmsr(msr,val1,val2) do {               \
616         int _err;                               \
617         u64 _l = paravirt_read_msr(msr, &_err); \
618         val1 = (u32)_l;                         \
619         val2 = _l >> 32;                        \
620 } while(0)
621
622 #define wrmsr(msr,val1,val2) do {               \
623         paravirt_write_msr(msr, val1, val2);    \
624 } while(0)
625
626 #define rdmsrl(msr,val) do {                    \
627         int _err;                               \
628         val = paravirt_read_msr(msr, &_err);    \
629 } while(0)
630
631 #define wrmsrl(msr,val)         wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
632 #define wrmsr_safe(msr,a,b)     paravirt_write_msr(msr, a, b)
633
634 /* rdmsr with exception handling */
635 #define rdmsr_safe(msr,a,b) ({                  \
636         int _err;                               \
637         u64 _l = paravirt_read_msr(msr, &_err); \
638         (*a) = (u32)_l;                         \
639         (*b) = _l >> 32;                        \
640         _err; })
641
642
643 static inline u64 paravirt_read_tsc(void)
644 {
645         return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
646 }
647
648 #define rdtscl(low) do {                        \
649         u64 _l = paravirt_read_tsc();           \
650         low = (int)_l;                          \
651 } while(0)
652
653 #define rdtscll(val) (val = paravirt_read_tsc())
654
655 static inline unsigned long long paravirt_sched_clock(void)
656 {
657         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
658 }
659 #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz())
660
661 static inline unsigned long long paravirt_read_pmc(int counter)
662 {
663         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
664 }
665
666 #define rdpmc(counter,low,high) do {            \
667         u64 _l = paravirt_read_pmc(counter);    \
668         low = (u32)_l;                          \
669         high = _l >> 32;                        \
670 } while(0)
671
672 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
673 {
674         return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
675 }
676
677 #define rdtscp(low, high, aux)                          \
678 do {                                                    \
679         int __aux;                                      \
680         unsigned long __val = paravirt_rdtscp(&__aux);  \
681         (low) = (u32)__val;                             \
682         (high) = (u32)(__val >> 32);                    \
683         (aux) = __aux;                                  \
684 } while (0)
685
686 #define rdtscpll(val, aux)                              \
687 do {                                                    \
688         unsigned long __aux;                            \
689         val = paravirt_rdtscp(&__aux);                  \
690         (aux) = __aux;                                  \
691 } while (0)
692
693 static inline void load_TR_desc(void)
694 {
695         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
696 }
697 static inline void load_gdt(const struct desc_ptr *dtr)
698 {
699         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
700 }
701 static inline void load_idt(const struct desc_ptr *dtr)
702 {
703         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
704 }
705 static inline void set_ldt(const void *addr, unsigned entries)
706 {
707         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
708 }
709 static inline void store_gdt(struct desc_ptr *dtr)
710 {
711         PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
712 }
713 static inline void store_idt(struct desc_ptr *dtr)
714 {
715         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
716 }
717 static inline unsigned long paravirt_store_tr(void)
718 {
719         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
720 }
721 #define store_tr(tr)    ((tr) = paravirt_store_tr())
722 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
723 {
724         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
725 }
726
727 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
728                                    const void *desc)
729 {
730         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
731 }
732
733 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
734                                    void *desc, int type)
735 {
736         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
737 }
738
739 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
740 {
741         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
742 }
743 static inline void set_iopl_mask(unsigned mask)
744 {
745         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
746 }
747
748 /* The paravirtualized I/O functions */
749 static inline void slow_down_io(void) {
750         pv_cpu_ops.io_delay();
751 #ifdef REALLY_SLOW_IO
752         pv_cpu_ops.io_delay();
753         pv_cpu_ops.io_delay();
754         pv_cpu_ops.io_delay();
755 #endif
756 }
757
758 #ifdef CONFIG_X86_LOCAL_APIC
759 /*
760  * Basic functions accessing APICs.
761  */
762 static inline void apic_write(unsigned long reg, u32 v)
763 {
764         PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
765 }
766
767 static inline void apic_write_atomic(unsigned long reg, u32 v)
768 {
769         PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v);
770 }
771
772 static inline u32 apic_read(unsigned long reg)
773 {
774         return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
775 }
776
777 static inline void setup_boot_clock(void)
778 {
779         PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
780 }
781
782 static inline void setup_secondary_clock(void)
783 {
784         PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
785 }
786 #endif
787
788 static inline void paravirt_post_allocator_init(void)
789 {
790         if (pv_init_ops.post_allocator_init)
791                 (*pv_init_ops.post_allocator_init)();
792 }
793
794 static inline void paravirt_pagetable_setup_start(pgd_t *base)
795 {
796         (*pv_mmu_ops.pagetable_setup_start)(base);
797 }
798
799 static inline void paravirt_pagetable_setup_done(pgd_t *base)
800 {
801         (*pv_mmu_ops.pagetable_setup_done)(base);
802 }
803
804 #ifdef CONFIG_SMP
805 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
806                                     unsigned long start_esp)
807 {
808         PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
809                     phys_apicid, start_eip, start_esp);
810 }
811 #endif
812
813 static inline void paravirt_activate_mm(struct mm_struct *prev,
814                                         struct mm_struct *next)
815 {
816         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
817 }
818
819 static inline void arch_dup_mmap(struct mm_struct *oldmm,
820                                  struct mm_struct *mm)
821 {
822         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
823 }
824
825 static inline void arch_exit_mmap(struct mm_struct *mm)
826 {
827         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
828 }
829
830 static inline void __flush_tlb(void)
831 {
832         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
833 }
834 static inline void __flush_tlb_global(void)
835 {
836         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
837 }
838 static inline void __flush_tlb_single(unsigned long addr)
839 {
840         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
841 }
842
843 static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
844                                     unsigned long va)
845 {
846         PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
847 }
848
849 static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
850 {
851         PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn);
852 }
853 static inline void paravirt_release_pt(unsigned pfn)
854 {
855         PVOP_VCALL1(pv_mmu_ops.release_pt, pfn);
856 }
857
858 static inline void paravirt_alloc_pd(unsigned pfn)
859 {
860         PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn);
861 }
862
863 static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
864                                            unsigned start, unsigned count)
865 {
866         PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count);
867 }
868 static inline void paravirt_release_pd(unsigned pfn)
869 {
870         PVOP_VCALL1(pv_mmu_ops.release_pd, pfn);
871 }
872
873 #ifdef CONFIG_HIGHPTE
874 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
875 {
876         unsigned long ret;
877         ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
878         return (void *)ret;
879 }
880 #endif
881
882 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
883                               pte_t *ptep)
884 {
885         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
886 }
887
888 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
889                                     pte_t *ptep)
890 {
891         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
892 }
893
894 #ifdef CONFIG_X86_PAE
895 static inline pte_t __pte(unsigned long long val)
896 {
897         unsigned long long ret = PVOP_CALL2(unsigned long long,
898                                             pv_mmu_ops.make_pte,
899                                             val, val >> 32);
900         return (pte_t) { ret, ret >> 32 };
901 }
902
903 static inline pmd_t __pmd(unsigned long long val)
904 {
905         return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd,
906                                     val, val >> 32) };
907 }
908
909 static inline pgd_t __pgd(unsigned long long val)
910 {
911         return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd,
912                                     val, val >> 32) };
913 }
914
915 static inline unsigned long long pte_val(pte_t x)
916 {
917         return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val,
918                           x.pte_low, x.pte_high);
919 }
920
921 static inline unsigned long long pmd_val(pmd_t x)
922 {
923         return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val,
924                           x.pmd, x.pmd >> 32);
925 }
926
927 static inline unsigned long long pgd_val(pgd_t x)
928 {
929         return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val,
930                           x.pgd, x.pgd >> 32);
931 }
932
933 static inline void set_pte(pte_t *ptep, pte_t pteval)
934 {
935         PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high);
936 }
937
938 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
939                               pte_t *ptep, pte_t pteval)
940 {
941         /* 5 arg words */
942         pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval);
943 }
944
945 static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
946 {
947         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
948                     pteval.pte_low, pteval.pte_high);
949 }
950
951 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
952                                    pte_t *ptep, pte_t pte)
953 {
954         /* 5 arg words */
955         pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
956 }
957
958 static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
959 {
960         PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp,
961                     pmdval.pmd, pmdval.pmd >> 32);
962 }
963
964 static inline void set_pud(pud_t *pudp, pud_t pudval)
965 {
966         PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
967                     pudval.pgd.pgd, pudval.pgd.pgd >> 32);
968 }
969
970 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
971 {
972         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
973 }
974
975 static inline void pmd_clear(pmd_t *pmdp)
976 {
977         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
978 }
979
980 #else  /* !CONFIG_X86_PAE */
981
982 static inline pte_t __pte(unsigned long val)
983 {
984         return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) };
985 }
986
987 static inline pgd_t __pgd(unsigned long val)
988 {
989         return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) };
990 }
991
992 static inline unsigned long pte_val(pte_t x)
993 {
994         return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low);
995 }
996
997 static inline unsigned long pgd_val(pgd_t x)
998 {
999         return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd);
1000 }
1001
1002 static inline void set_pte(pte_t *ptep, pte_t pteval)
1003 {
1004         PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low);
1005 }
1006
1007 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1008                               pte_t *ptep, pte_t pteval)
1009 {
1010         PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low);
1011 }
1012
1013 static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
1014 {
1015         PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd);
1016 }
1017 #endif  /* CONFIG_X86_PAE */
1018
1019 /* Lazy mode for batching updates / context switch */
1020 enum paravirt_lazy_mode {
1021         PARAVIRT_LAZY_NONE,
1022         PARAVIRT_LAZY_MMU,
1023         PARAVIRT_LAZY_CPU,
1024 };
1025
1026 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1027 void paravirt_enter_lazy_cpu(void);
1028 void paravirt_leave_lazy_cpu(void);
1029 void paravirt_enter_lazy_mmu(void);
1030 void paravirt_leave_lazy_mmu(void);
1031 void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1032
1033 #define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1034 static inline void arch_enter_lazy_cpu_mode(void)
1035 {
1036         PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1037 }
1038
1039 static inline void arch_leave_lazy_cpu_mode(void)
1040 {
1041         PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1042 }
1043
1044 static inline void arch_flush_lazy_cpu_mode(void)
1045 {
1046         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1047                 arch_leave_lazy_cpu_mode();
1048                 arch_enter_lazy_cpu_mode();
1049         }
1050 }
1051
1052
1053 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1054 static inline void arch_enter_lazy_mmu_mode(void)
1055 {
1056         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1057 }
1058
1059 static inline void arch_leave_lazy_mmu_mode(void)
1060 {
1061         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1062 }
1063
1064 static inline void arch_flush_lazy_mmu_mode(void)
1065 {
1066         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1067                 arch_leave_lazy_mmu_mode();
1068                 arch_enter_lazy_mmu_mode();
1069         }
1070 }
1071
1072 void _paravirt_nop(void);
1073 #define paravirt_nop    ((void *)_paravirt_nop)
1074
1075 /* These all sit in the .parainstructions section to tell us what to patch. */
1076 struct paravirt_patch_site {
1077         u8 *instr;              /* original instructions */
1078         u8 instrtype;           /* type of this instruction */
1079         u8 len;                 /* length of original instruction */
1080         u16 clobbers;           /* what registers you may clobber */
1081 };
1082
1083 extern struct paravirt_patch_site __parainstructions[],
1084         __parainstructions_end[];
1085
1086 static inline unsigned long __raw_local_save_flags(void)
1087 {
1088         unsigned long f;
1089
1090         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1091                                   PARAVIRT_CALL
1092                                   "popl %%edx; popl %%ecx")
1093                      : "=a"(f)
1094                      : paravirt_type(pv_irq_ops.save_fl),
1095                        paravirt_clobber(CLBR_EAX)
1096                      : "memory", "cc");
1097         return f;
1098 }
1099
1100 static inline void raw_local_irq_restore(unsigned long f)
1101 {
1102         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1103                                   PARAVIRT_CALL
1104                                   "popl %%edx; popl %%ecx")
1105                      : "=a"(f)
1106                      : "0"(f),
1107                        paravirt_type(pv_irq_ops.restore_fl),
1108                        paravirt_clobber(CLBR_EAX)
1109                      : "memory", "cc");
1110 }
1111
1112 static inline void raw_local_irq_disable(void)
1113 {
1114         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1115                                   PARAVIRT_CALL
1116                                   "popl %%edx; popl %%ecx")
1117                      :
1118                      : paravirt_type(pv_irq_ops.irq_disable),
1119                        paravirt_clobber(CLBR_EAX)
1120                      : "memory", "eax", "cc");
1121 }
1122
1123 static inline void raw_local_irq_enable(void)
1124 {
1125         asm volatile(paravirt_alt("pushl %%ecx; pushl %%edx;"
1126                                   PARAVIRT_CALL
1127                                   "popl %%edx; popl %%ecx")
1128                      :
1129                      : paravirt_type(pv_irq_ops.irq_enable),
1130                        paravirt_clobber(CLBR_EAX)
1131                      : "memory", "eax", "cc");
1132 }
1133
1134 static inline unsigned long __raw_local_irq_save(void)
1135 {
1136         unsigned long f;
1137
1138         f = __raw_local_save_flags();
1139         raw_local_irq_disable();
1140         return f;
1141 }
1142
1143 /* Make sure as little as possible of this mess escapes. */
1144 #undef PARAVIRT_CALL
1145 #undef __PVOP_CALL
1146 #undef __PVOP_VCALL
1147 #undef PVOP_VCALL0
1148 #undef PVOP_CALL0
1149 #undef PVOP_VCALL1
1150 #undef PVOP_CALL1
1151 #undef PVOP_VCALL2
1152 #undef PVOP_CALL2
1153 #undef PVOP_VCALL3
1154 #undef PVOP_CALL3
1155 #undef PVOP_VCALL4
1156 #undef PVOP_CALL4
1157
1158 #else  /* __ASSEMBLY__ */
1159
1160 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1161
1162 #define PARA_SITE(ptype, clobbers, ops)         \
1163 771:;                                           \
1164         ops;                                    \
1165 772:;                                           \
1166         .pushsection .parainstructions,"a";     \
1167          .long 771b;                            \
1168          .byte ptype;                           \
1169          .byte 772b-771b;                       \
1170          .short clobbers;                       \
1171         .popsection
1172
1173 #define INTERRUPT_RETURN                                                \
1174         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
1175                   jmp *%cs:pv_cpu_ops+PV_CPU_iret)
1176
1177 #define DISABLE_INTERRUPTS(clobbers)                                    \
1178         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1179                   pushl %eax; pushl %ecx; pushl %edx;                   \
1180                   call *%cs:pv_irq_ops+PV_IRQ_irq_disable;              \
1181                   popl %edx; popl %ecx; popl %eax)                      \
1182
1183 #define ENABLE_INTERRUPTS(clobbers)                                     \
1184         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
1185                   pushl %eax; pushl %ecx; pushl %edx;                   \
1186                   call *%cs:pv_irq_ops+PV_IRQ_irq_enable;               \
1187                   popl %edx; popl %ecx; popl %eax)
1188
1189 #define ENABLE_INTERRUPTS_SYSCALL_RET                                   \
1190         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\
1191                   CLBR_NONE,                                            \
1192                   jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret)
1193
1194 #define GET_CR0_INTO_EAX                        \
1195         push %ecx; push %edx;                   \
1196         call *pv_cpu_ops+PV_CPU_read_cr0;       \
1197         pop %edx; pop %ecx
1198
1199 #endif /* __ASSEMBLY__ */
1200 #endif /* CONFIG_PARAVIRT */
1201 #endif  /* __ASM_PARAVIRT_H */