1 #ifndef __ASM_PARAVIRT_H
2 #define __ASM_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
10 /* Bitmask of what can be clobbered: usually at least eax. */
18 #include <linux/types.h>
19 #include <linux/cpumask.h>
20 #include <asm/kmap_types.h>
21 #include <asm/desc_defs.h>
32 unsigned int kernel_rpl;
33 int shared_kernel_pmd;
40 * Patch may replace one of the defined code sequences with
41 * arbitrary code, subject to the same register constraints.
42 * This generally means the code is not free to clobber any
43 * registers other than EAX. The patch function should return
44 * the number of bytes of code generated, as we nop pad the
45 * rest in generic code.
47 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
48 unsigned long addr, unsigned len);
50 /* Basic arch-specific setup */
51 void (*arch_setup)(void);
52 char *(*memory_setup)(void);
53 void (*post_allocator_init)(void);
55 /* Print a banner to identify the environment */
61 /* Set deferred update mode, used for batching operations. */
67 void (*time_init)(void);
69 /* Set and set time of day */
70 unsigned long (*get_wallclock)(void);
71 int (*set_wallclock)(unsigned long);
73 unsigned long long (*sched_clock)(void);
74 unsigned long (*get_cpu_khz)(void);
78 /* hooks for various privileged instructions */
79 unsigned long (*get_debugreg)(int regno);
80 void (*set_debugreg)(int regno, unsigned long value);
84 unsigned long (*read_cr0)(void);
85 void (*write_cr0)(unsigned long);
87 unsigned long (*read_cr4_safe)(void);
88 unsigned long (*read_cr4)(void);
89 void (*write_cr4)(unsigned long);
91 /* Segment descriptor handling */
92 void (*load_tr_desc)(void);
93 void (*load_gdt)(const struct desc_ptr *);
94 void (*load_idt)(const struct desc_ptr *);
95 void (*store_gdt)(struct desc_ptr *);
96 void (*store_idt)(struct desc_ptr *);
97 void (*set_ldt)(const void *desc, unsigned entries);
98 unsigned long (*store_tr)(void);
99 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
100 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
102 void (*write_gdt_entry)(struct desc_struct *,
103 int entrynum, const void *desc, int size);
104 void (*write_idt_entry)(gate_desc *,
105 int entrynum, const gate_desc *gate);
106 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
108 void (*set_iopl_mask)(unsigned mask);
110 void (*wbinvd)(void);
111 void (*io_delay)(void);
113 /* cpuid emulation, mostly so that caps bits can be disabled */
114 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
115 unsigned int *ecx, unsigned int *edx);
117 /* MSR, PMC and TSR operations.
118 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
119 u64 (*read_msr)(unsigned int msr, int *err);
120 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
122 u64 (*read_tsc)(void);
123 u64 (*read_pmc)(int counter);
124 unsigned long long (*read_tscp)(unsigned int *aux);
126 /* These two are jmp to, not actually called. */
127 void (*irq_enable_syscall_ret)(void);
130 void (*swapgs)(void);
132 struct pv_lazy_ops lazy_mode;
136 void (*init_IRQ)(void);
139 * Get/set interrupt state. save_fl and restore_fl are only
140 * expected to use X86_EFLAGS_IF; all other bits
141 * returned from save_fl are undefined, and may be ignored by
144 unsigned long (*save_fl)(void);
145 void (*restore_fl)(unsigned long);
146 void (*irq_disable)(void);
147 void (*irq_enable)(void);
148 void (*safe_halt)(void);
153 #ifdef CONFIG_X86_LOCAL_APIC
155 * Direct APIC operations, principally for VMI. Ideally
156 * these shouldn't be in this interface.
158 void (*apic_write)(unsigned long reg, u32 v);
159 void (*apic_write_atomic)(unsigned long reg, u32 v);
160 u32 (*apic_read)(unsigned long reg);
161 void (*setup_boot_clock)(void);
162 void (*setup_secondary_clock)(void);
164 void (*startup_ipi_hook)(int phys_apicid,
165 unsigned long start_eip,
166 unsigned long start_esp);
172 * Called before/after init_mm pagetable setup. setup_start
173 * may reset %cr3, and may pre-install parts of the pagetable;
174 * pagetable setup is expected to preserve any existing
177 void (*pagetable_setup_start)(pgd_t *pgd_base);
178 void (*pagetable_setup_done)(pgd_t *pgd_base);
180 unsigned long (*read_cr2)(void);
181 void (*write_cr2)(unsigned long);
183 unsigned long (*read_cr3)(void);
184 void (*write_cr3)(unsigned long);
187 * Hooks for intercepting the creation/use/destruction of an
190 void (*activate_mm)(struct mm_struct *prev,
191 struct mm_struct *next);
192 void (*dup_mmap)(struct mm_struct *oldmm,
193 struct mm_struct *mm);
194 void (*exit_mmap)(struct mm_struct *mm);
198 void (*flush_tlb_user)(void);
199 void (*flush_tlb_kernel)(void);
200 void (*flush_tlb_single)(unsigned long addr);
201 void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
204 /* Hooks for allocating/releasing pagetable pages */
205 void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
206 void (*alloc_pd)(u32 pfn);
207 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
208 void (*release_pt)(u32 pfn);
209 void (*release_pd)(u32 pfn);
211 /* Pagetable manipulation functions */
212 void (*set_pte)(pte_t *ptep, pte_t pteval);
213 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
214 pte_t *ptep, pte_t pteval);
215 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
216 void (*pte_update)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
217 void (*pte_update_defer)(struct mm_struct *mm,
218 unsigned long addr, pte_t *ptep);
220 #ifdef CONFIG_X86_PAE
221 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
222 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
223 pte_t *ptep, pte_t pte);
224 void (*set_pud)(pud_t *pudp, pud_t pudval);
225 void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
226 void (*pmd_clear)(pmd_t *pmdp);
228 unsigned long long (*pte_val)(pte_t);
229 unsigned long long (*pmd_val)(pmd_t);
230 unsigned long long (*pgd_val)(pgd_t);
232 pte_t (*make_pte)(unsigned long long pte);
233 pmd_t (*make_pmd)(unsigned long long pmd);
234 pgd_t (*make_pgd)(unsigned long long pgd);
236 unsigned long (*pte_val)(pte_t);
237 unsigned long (*pgd_val)(pgd_t);
239 pte_t (*make_pte)(unsigned long pte);
240 pgd_t (*make_pgd)(unsigned long pgd);
243 #ifdef CONFIG_HIGHPTE
244 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
247 struct pv_lazy_ops lazy_mode;
250 /* This contains all the paravirt structures: we get a convenient
251 * number for each function using the offset which we use to indicate
253 struct paravirt_patch_template
255 struct pv_init_ops pv_init_ops;
256 struct pv_time_ops pv_time_ops;
257 struct pv_cpu_ops pv_cpu_ops;
258 struct pv_irq_ops pv_irq_ops;
259 struct pv_apic_ops pv_apic_ops;
260 struct pv_mmu_ops pv_mmu_ops;
263 extern struct pv_info pv_info;
264 extern struct pv_init_ops pv_init_ops;
265 extern struct pv_time_ops pv_time_ops;
266 extern struct pv_cpu_ops pv_cpu_ops;
267 extern struct pv_irq_ops pv_irq_ops;
268 extern struct pv_apic_ops pv_apic_ops;
269 extern struct pv_mmu_ops pv_mmu_ops;
271 #define PARAVIRT_PATCH(x) \
272 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
274 #define paravirt_type(op) \
275 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
276 [paravirt_opptr] "m" (op)
277 #define paravirt_clobber(clobber) \
278 [paravirt_clobber] "i" (clobber)
281 * Generate some code, and mark it as patchable by the
282 * apply_paravirt() alternate instruction patcher.
284 #define _paravirt_alt(insn_string, type, clobber) \
285 "771:\n\t" insn_string "\n" "772:\n" \
286 ".pushsection .parainstructions,\"a\"\n" \
289 " .byte " type "\n" \
290 " .byte 772b-771b\n" \
291 " .short " clobber "\n" \
294 /* Generate patchable code, with the default asm parameters. */
295 #define paravirt_alt(insn_string) \
296 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
298 unsigned paravirt_patch_nop(void);
299 unsigned paravirt_patch_ignore(unsigned len);
300 unsigned paravirt_patch_call(void *insnbuf,
301 const void *target, u16 tgt_clobbers,
302 unsigned long addr, u16 site_clobbers,
304 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
305 unsigned long addr, unsigned len);
306 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
307 unsigned long addr, unsigned len);
309 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
310 const char *start, const char *end);
312 int paravirt_disable_iospace(void);
315 * This generates an indirect call based on the operation type number.
316 * The type number, computed in PARAVIRT_PATCH, is derived from the
317 * offset into the paravirt_patch_template structure, and can therefore be
318 * freely converted back into a structure offset.
320 #define PARAVIRT_CALL "call *%[paravirt_opptr];"
323 * These macros are intended to wrap calls through one of the paravirt
324 * ops structs, so that they can be later identified and patched at
327 * Normally, a call to a pv_op function is a simple indirect call:
328 * (pv_op_struct.operations)(args...).
330 * Unfortunately, this is a relatively slow operation for modern CPUs,
331 * because it cannot necessarily determine what the destination
332 * address is. In this case, the address is a runtime constant, so at
333 * the very least we can patch the call to e a simple direct call, or
334 * ideally, patch an inline implementation into the callsite. (Direct
335 * calls are essentially free, because the call and return addresses
336 * are completely predictable.)
338 * For i386, these macros rely on the standard gcc "regparm(3)" calling
339 * convention, in which the first three arguments are placed in %eax,
340 * %edx, %ecx (in that order), and the remaining arguments are placed
341 * on the stack. All caller-save registers (eax,edx,ecx) are expected
342 * to be modified (either clobbered or used for return values).
343 * X86_64, on the other hand, already specifies a register-based calling
344 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
345 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
346 * special handling for dealing with 4 arguments, unlike i386.
347 * However, x86_64 also have to clobber all caller saved registers, which
348 * unfortunately, are quite a bit (r8 - r11)
350 * The call instruction itself is marked by placing its start address
351 * and size into the .parainstructions section, so that
352 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
353 * appropriate patching under the control of the backend pv_init_ops
356 * Unfortunately there's no way to get gcc to generate the args setup
357 * for the call, and then allow the call itself to be generated by an
358 * inline asm. Because of this, we must do the complete arg setup and
359 * return value handling from within these macros. This is fairly
362 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
363 * It could be extended to more arguments, but there would be little
364 * to be gained from that. For each number of arguments, there are
365 * the two VCALL and CALL variants for void and non-void functions.
367 * When there is a return value, the invoker of the macro must specify
368 * the return type. The macro then uses sizeof() on that type to
369 * determine whether its a 32 or 64 bit value, and places the return
370 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
371 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
372 * the return value size.
374 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
375 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
378 * Small structures are passed and returned in registers. The macro
379 * calling convention can't directly deal with this, so the wrapper
380 * functions must do this.
382 * These PVOP_* macros are only defined within this header. This
383 * means that all uses must be wrapped in inline functions. This also
384 * makes sure the incoming and outgoing types are always correct.
387 #define PVOP_VCALL_ARGS unsigned long __eax, __edx, __ecx
388 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS
389 #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
391 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
392 #define EXTRA_CLOBBERS
393 #define VEXTRA_CLOBBERS
395 #define PVOP_VCALL_ARGS unsigned long __edi, __esi, __edx, __ecx
396 #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
397 #define PVOP_VCALL_CLOBBERS "=D" (__edi), \
398 "=S" (__esi), "=d" (__edx), \
401 #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
403 #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
404 #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
407 #define __PVOP_CALL(rettype, op, pre, post, ...) \
411 /* This is 32-bit specific, but is okay in 64-bit */ \
412 /* since this condition will never hold */ \
413 if (sizeof(rettype) > sizeof(unsigned long)) { \
415 paravirt_alt(PARAVIRT_CALL) \
417 : PVOP_CALL_CLOBBERS \
418 : paravirt_type(op), \
419 paravirt_clobber(CLBR_ANY), \
421 : "memory", "cc" EXTRA_CLOBBERS); \
422 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
425 paravirt_alt(PARAVIRT_CALL) \
427 : PVOP_CALL_CLOBBERS \
428 : paravirt_type(op), \
429 paravirt_clobber(CLBR_ANY), \
431 : "memory", "cc" EXTRA_CLOBBERS); \
432 __ret = (rettype)__eax; \
436 #define __PVOP_VCALL(op, pre, post, ...) \
440 paravirt_alt(PARAVIRT_CALL) \
442 : PVOP_VCALL_CLOBBERS \
443 : paravirt_type(op), \
444 paravirt_clobber(CLBR_ANY), \
446 : "memory", "cc" VEXTRA_CLOBBERS); \
449 #define PVOP_CALL0(rettype, op) \
450 __PVOP_CALL(rettype, op, "", "")
451 #define PVOP_VCALL0(op) \
452 __PVOP_VCALL(op, "", "")
454 #define PVOP_CALL1(rettype, op, arg1) \
455 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
456 #define PVOP_VCALL1(op, arg1) \
457 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
459 #define PVOP_CALL2(rettype, op, arg1, arg2) \
460 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
461 "1" ((unsigned long)(arg2)))
462 #define PVOP_VCALL2(op, arg1, arg2) \
463 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
464 "1" ((unsigned long)(arg2)))
466 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
467 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
468 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
469 #define PVOP_VCALL3(op, arg1, arg2, arg3) \
470 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
471 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
473 /* This is the only difference in x86_64. We can make it much simpler */
475 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
476 __PVOP_CALL(rettype, op, \
477 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
478 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
479 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
480 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
482 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
483 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
484 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
486 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
487 __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)), \
488 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
489 "3"((unsigned long)(arg4)))
490 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
491 __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)), \
492 "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)), \
493 "3"((unsigned long)(arg4)))
496 static inline int paravirt_enabled(void)
498 return pv_info.paravirt_enabled;
501 static inline void load_sp0(struct tss_struct *tss,
502 struct thread_struct *thread)
504 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
507 #define ARCH_SETUP pv_init_ops.arch_setup();
508 static inline unsigned long get_wallclock(void)
510 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
513 static inline int set_wallclock(unsigned long nowtime)
515 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
518 static inline void (*choose_time_init(void))(void)
520 return pv_time_ops.time_init;
523 /* The paravirtualized CPUID instruction. */
524 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
525 unsigned int *ecx, unsigned int *edx)
527 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
531 * These special macros can be used to get or set a debugging register
533 static inline unsigned long paravirt_get_debugreg(int reg)
535 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
537 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
538 static inline void set_debugreg(unsigned long val, int reg)
540 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
543 static inline void clts(void)
545 PVOP_VCALL0(pv_cpu_ops.clts);
548 static inline unsigned long read_cr0(void)
550 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
553 static inline void write_cr0(unsigned long x)
555 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
558 static inline unsigned long read_cr2(void)
560 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
563 static inline void write_cr2(unsigned long x)
565 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
568 static inline unsigned long read_cr3(void)
570 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
573 static inline void write_cr3(unsigned long x)
575 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
578 static inline unsigned long read_cr4(void)
580 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
582 static inline unsigned long read_cr4_safe(void)
584 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
587 static inline void write_cr4(unsigned long x)
589 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
592 static inline void raw_safe_halt(void)
594 PVOP_VCALL0(pv_irq_ops.safe_halt);
597 static inline void halt(void)
599 PVOP_VCALL0(pv_irq_ops.safe_halt);
602 static inline void wbinvd(void)
604 PVOP_VCALL0(pv_cpu_ops.wbinvd);
607 #define get_kernel_rpl() (pv_info.kernel_rpl)
609 static inline u64 paravirt_read_msr(unsigned msr, int *err)
611 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
613 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
615 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
618 /* These should all do BUG_ON(_err), but our headers are too tangled. */
619 #define rdmsr(msr,val1,val2) do { \
621 u64 _l = paravirt_read_msr(msr, &_err); \
626 #define wrmsr(msr,val1,val2) do { \
627 paravirt_write_msr(msr, val1, val2); \
630 #define rdmsrl(msr,val) do { \
632 val = paravirt_read_msr(msr, &_err); \
635 #define wrmsrl(msr,val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
636 #define wrmsr_safe(msr,a,b) paravirt_write_msr(msr, a, b)
638 /* rdmsr with exception handling */
639 #define rdmsr_safe(msr,a,b) ({ \
641 u64 _l = paravirt_read_msr(msr, &_err); \
647 static inline u64 paravirt_read_tsc(void)
649 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
652 #define rdtscl(low) do { \
653 u64 _l = paravirt_read_tsc(); \
657 #define rdtscll(val) (val = paravirt_read_tsc())
659 static inline unsigned long long paravirt_sched_clock(void)
661 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
663 #define calculate_cpu_khz() (pv_time_ops.get_cpu_khz())
665 static inline unsigned long long paravirt_read_pmc(int counter)
667 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
670 #define rdpmc(counter,low,high) do { \
671 u64 _l = paravirt_read_pmc(counter); \
676 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
678 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
681 #define rdtscp(low, high, aux) \
684 unsigned long __val = paravirt_rdtscp(&__aux); \
685 (low) = (u32)__val; \
686 (high) = (u32)(__val >> 32); \
690 #define rdtscpll(val, aux) \
692 unsigned long __aux; \
693 val = paravirt_rdtscp(&__aux); \
697 static inline void load_TR_desc(void)
699 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
701 static inline void load_gdt(const struct desc_ptr *dtr)
703 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
705 static inline void load_idt(const struct desc_ptr *dtr)
707 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
709 static inline void set_ldt(const void *addr, unsigned entries)
711 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
713 static inline void store_gdt(struct desc_ptr *dtr)
715 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
717 static inline void store_idt(struct desc_ptr *dtr)
719 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
721 static inline unsigned long paravirt_store_tr(void)
723 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
725 #define store_tr(tr) ((tr) = paravirt_store_tr())
726 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
728 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
731 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
734 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
737 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
738 void *desc, int type)
740 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
743 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
745 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
747 static inline void set_iopl_mask(unsigned mask)
749 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
752 /* The paravirtualized I/O functions */
753 static inline void slow_down_io(void) {
754 pv_cpu_ops.io_delay();
755 #ifdef REALLY_SLOW_IO
756 pv_cpu_ops.io_delay();
757 pv_cpu_ops.io_delay();
758 pv_cpu_ops.io_delay();
762 #ifdef CONFIG_X86_LOCAL_APIC
764 * Basic functions accessing APICs.
766 static inline void apic_write(unsigned long reg, u32 v)
768 PVOP_VCALL2(pv_apic_ops.apic_write, reg, v);
771 static inline void apic_write_atomic(unsigned long reg, u32 v)
773 PVOP_VCALL2(pv_apic_ops.apic_write_atomic, reg, v);
776 static inline u32 apic_read(unsigned long reg)
778 return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg);
781 static inline void setup_boot_clock(void)
783 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
786 static inline void setup_secondary_clock(void)
788 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
792 static inline void paravirt_post_allocator_init(void)
794 if (pv_init_ops.post_allocator_init)
795 (*pv_init_ops.post_allocator_init)();
798 static inline void paravirt_pagetable_setup_start(pgd_t *base)
800 (*pv_mmu_ops.pagetable_setup_start)(base);
803 static inline void paravirt_pagetable_setup_done(pgd_t *base)
805 (*pv_mmu_ops.pagetable_setup_done)(base);
809 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
810 unsigned long start_esp)
812 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
813 phys_apicid, start_eip, start_esp);
817 static inline void paravirt_activate_mm(struct mm_struct *prev,
818 struct mm_struct *next)
820 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
823 static inline void arch_dup_mmap(struct mm_struct *oldmm,
824 struct mm_struct *mm)
826 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
829 static inline void arch_exit_mmap(struct mm_struct *mm)
831 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
834 static inline void __flush_tlb(void)
836 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
838 static inline void __flush_tlb_global(void)
840 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
842 static inline void __flush_tlb_single(unsigned long addr)
844 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
847 static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
850 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
853 static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
855 PVOP_VCALL2(pv_mmu_ops.alloc_pt, mm, pfn);
857 static inline void paravirt_release_pt(unsigned pfn)
859 PVOP_VCALL1(pv_mmu_ops.release_pt, pfn);
862 static inline void paravirt_alloc_pd(unsigned pfn)
864 PVOP_VCALL1(pv_mmu_ops.alloc_pd, pfn);
867 static inline void paravirt_alloc_pd_clone(unsigned pfn, unsigned clonepfn,
868 unsigned start, unsigned count)
870 PVOP_VCALL4(pv_mmu_ops.alloc_pd_clone, pfn, clonepfn, start, count);
872 static inline void paravirt_release_pd(unsigned pfn)
874 PVOP_VCALL1(pv_mmu_ops.release_pd, pfn);
877 #ifdef CONFIG_HIGHPTE
878 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
881 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
886 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
889 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
892 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
895 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
898 #ifdef CONFIG_X86_PAE
899 static inline pte_t __pte(unsigned long long val)
901 unsigned long long ret = PVOP_CALL2(unsigned long long,
904 return (pte_t) { ret, ret >> 32 };
907 static inline pmd_t __pmd(unsigned long long val)
909 return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd,
913 static inline pgd_t __pgd(unsigned long long val)
915 return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd,
919 static inline unsigned long long pte_val(pte_t x)
921 return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val,
922 x.pte_low, x.pte_high);
925 static inline unsigned long long pmd_val(pmd_t x)
927 return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val,
931 static inline unsigned long long pgd_val(pgd_t x)
933 return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val,
937 static inline void set_pte(pte_t *ptep, pte_t pteval)
939 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high);
942 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
943 pte_t *ptep, pte_t pteval)
946 pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval);
949 static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
951 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
952 pteval.pte_low, pteval.pte_high);
955 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
956 pte_t *ptep, pte_t pte)
959 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
962 static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
964 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp,
965 pmdval.pmd, pmdval.pmd >> 32);
968 static inline void set_pud(pud_t *pudp, pud_t pudval)
970 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
971 pudval.pgd.pgd, pudval.pgd.pgd >> 32);
974 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
976 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
979 static inline void pmd_clear(pmd_t *pmdp)
981 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
984 #else /* !CONFIG_X86_PAE */
986 static inline pte_t __pte(unsigned long val)
988 return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) };
991 static inline pgd_t __pgd(unsigned long val)
993 return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) };
996 static inline unsigned long pte_val(pte_t x)
998 return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low);
1001 static inline unsigned long pgd_val(pgd_t x)
1003 return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd);
1006 static inline void set_pte(pte_t *ptep, pte_t pteval)
1008 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low);
1011 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1012 pte_t *ptep, pte_t pteval)
1014 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low);
1017 static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
1019 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd);
1021 #endif /* CONFIG_X86_PAE */
1023 /* Lazy mode for batching updates / context switch */
1024 enum paravirt_lazy_mode {
1030 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1031 void paravirt_enter_lazy_cpu(void);
1032 void paravirt_leave_lazy_cpu(void);
1033 void paravirt_enter_lazy_mmu(void);
1034 void paravirt_leave_lazy_mmu(void);
1035 void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1037 #define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1038 static inline void arch_enter_lazy_cpu_mode(void)
1040 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1043 static inline void arch_leave_lazy_cpu_mode(void)
1045 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1048 static inline void arch_flush_lazy_cpu_mode(void)
1050 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1051 arch_leave_lazy_cpu_mode();
1052 arch_enter_lazy_cpu_mode();
1057 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1058 static inline void arch_enter_lazy_mmu_mode(void)
1060 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1063 static inline void arch_leave_lazy_mmu_mode(void)
1065 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1068 static inline void arch_flush_lazy_mmu_mode(void)
1070 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1071 arch_leave_lazy_mmu_mode();
1072 arch_enter_lazy_mmu_mode();
1076 void _paravirt_nop(void);
1077 #define paravirt_nop ((void *)_paravirt_nop)
1079 /* These all sit in the .parainstructions section to tell us what to patch. */
1080 struct paravirt_patch_site {
1081 u8 *instr; /* original instructions */
1082 u8 instrtype; /* type of this instruction */
1083 u8 len; /* length of original instruction */
1084 u16 clobbers; /* what registers you may clobber */
1087 extern struct paravirt_patch_site __parainstructions[],
1088 __parainstructions_end[];
1090 #ifdef CONFIG_X86_32
1091 #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1092 #define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1093 #define PV_FLAGS_ARG "0"
1094 #define PV_EXTRA_CLOBBERS
1095 #define PV_VEXTRA_CLOBBERS
1097 /* We save some registers, but all of them, that's too much. We clobber all
1098 * caller saved registers but the argument parameter */
1099 #define PV_SAVE_REGS "pushq %%rdi;"
1100 #define PV_RESTORE_REGS "popq %%rdi;"
1101 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx"
1102 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx"
1103 #define PV_FLAGS_ARG "D"
1106 static inline unsigned long __raw_local_save_flags(void)
1110 asm volatile(paravirt_alt(PV_SAVE_REGS
1114 : paravirt_type(pv_irq_ops.save_fl),
1115 paravirt_clobber(CLBR_EAX)
1116 : "memory", "cc" PV_VEXTRA_CLOBBERS);
1120 static inline void raw_local_irq_restore(unsigned long f)
1122 asm volatile(paravirt_alt(PV_SAVE_REGS
1127 paravirt_type(pv_irq_ops.restore_fl),
1128 paravirt_clobber(CLBR_EAX)
1129 : "memory", "cc" PV_EXTRA_CLOBBERS);
1132 static inline void raw_local_irq_disable(void)
1134 asm volatile(paravirt_alt(PV_SAVE_REGS
1138 : paravirt_type(pv_irq_ops.irq_disable),
1139 paravirt_clobber(CLBR_EAX)
1140 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1143 static inline void raw_local_irq_enable(void)
1145 asm volatile(paravirt_alt(PV_SAVE_REGS
1149 : paravirt_type(pv_irq_ops.irq_enable),
1150 paravirt_clobber(CLBR_EAX)
1151 : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1154 static inline unsigned long __raw_local_irq_save(void)
1158 f = __raw_local_save_flags();
1159 raw_local_irq_disable();
1163 /* Make sure as little as possible of this mess escapes. */
1164 #undef PARAVIRT_CALL
1178 #else /* __ASSEMBLY__ */
1180 #define _PVSITE(ptype, clobbers, ops, word, algn) \
1184 .pushsection .parainstructions,"a"; \
1193 #ifdef CONFIG_X86_64
1194 #define PV_SAVE_REGS pushq %rax; pushq %rdi; pushq %rcx; pushq %rdx
1195 #define PV_RESTORE_REGS popq %rdx; popq %rcx; popq %rdi; popq %rax
1196 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
1197 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1199 #define PV_SAVE_REGS pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1200 #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1201 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
1202 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1205 #define INTERRUPT_RETURN \
1206 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
1207 jmp *%cs:pv_cpu_ops+PV_CPU_iret)
1209 #define DISABLE_INTERRUPTS(clobbers) \
1210 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1212 call *%cs:pv_irq_ops+PV_IRQ_irq_disable; \
1215 #define ENABLE_INTERRUPTS(clobbers) \
1216 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
1218 call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \
1221 #define ENABLE_INTERRUPTS_SYSCALL_RET \
1222 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\
1224 jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret)
1227 #ifdef CONFIG_X86_32
1228 #define GET_CR0_INTO_EAX \
1229 push %ecx; push %edx; \
1230 call *pv_cpu_ops+PV_CPU_read_cr0; \
1234 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1236 call *pv_cpu_ops+PV_CPU_swapgs; \
1240 #define GET_CR2_INTO_RCX \
1241 call *pv_mmu_ops+PV_MMU_read_cr2; \
1247 #endif /* __ASSEMBLY__ */
1248 #endif /* CONFIG_PARAVIRT */
1249 #endif /* __ASM_PARAVIRT_H */