KVM: switch vcpu context to use SRCU
[sfrench/cifs-2.6.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "irq.h"
19 #include "mmu.h"
20
21 #include <linux/kvm_host.h>
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/highmem.h>
26 #include <linux/sched.h>
27 #include <linux/moduleparam.h>
28 #include <linux/ftrace_event.h>
29 #include "kvm_cache_regs.h"
30 #include "x86.h"
31
32 #include <asm/io.h>
33 #include <asm/desc.h>
34 #include <asm/vmx.h>
35 #include <asm/virtext.h>
36 #include <asm/mce.h>
37
38 #include "trace.h"
39
40 #define __ex(x) __kvm_handle_fault_on_reboot(x)
41
42 MODULE_AUTHOR("Qumranet");
43 MODULE_LICENSE("GPL");
44
45 static int __read_mostly bypass_guest_pf = 1;
46 module_param(bypass_guest_pf, bool, S_IRUGO);
47
48 static int __read_mostly enable_vpid = 1;
49 module_param_named(vpid, enable_vpid, bool, 0444);
50
51 static int __read_mostly flexpriority_enabled = 1;
52 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
53
54 static int __read_mostly enable_ept = 1;
55 module_param_named(ept, enable_ept, bool, S_IRUGO);
56
57 static int __read_mostly enable_unrestricted_guest = 1;
58 module_param_named(unrestricted_guest,
59                         enable_unrestricted_guest, bool, S_IRUGO);
60
61 static int __read_mostly emulate_invalid_guest_state = 0;
62 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
63
64 #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST                           \
65         (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
66 #define KVM_GUEST_CR0_MASK                                              \
67         (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
68 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST                         \
69         (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
70 #define KVM_VM_CR0_ALWAYS_ON                                            \
71         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
72 #define KVM_CR4_GUEST_OWNED_BITS                                      \
73         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
74          | X86_CR4_OSXMMEXCPT)
75
76 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
77 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
78
79 /*
80  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
81  * ple_gap:    upper bound on the amount of time between two successive
82  *             executions of PAUSE in a loop. Also indicate if ple enabled.
83  *             According to test, this time is usually small than 41 cycles.
84  * ple_window: upper bound on the amount of time a guest is allowed to execute
85  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
86  *             less than 2^12 cycles
87  * Time is measured based on a counter that runs at the same rate as the TSC,
88  * refer SDM volume 3b section 21.6.13 & 22.1.3.
89  */
90 #define KVM_VMX_DEFAULT_PLE_GAP    41
91 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
92 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
93 module_param(ple_gap, int, S_IRUGO);
94
95 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
96 module_param(ple_window, int, S_IRUGO);
97
98 struct vmcs {
99         u32 revision_id;
100         u32 abort;
101         char data[0];
102 };
103
104 struct shared_msr_entry {
105         unsigned index;
106         u64 data;
107         u64 mask;
108 };
109
110 struct vcpu_vmx {
111         struct kvm_vcpu       vcpu;
112         struct list_head      local_vcpus_link;
113         unsigned long         host_rsp;
114         int                   launched;
115         u8                    fail;
116         u32                   idt_vectoring_info;
117         struct shared_msr_entry *guest_msrs;
118         int                   nmsrs;
119         int                   save_nmsrs;
120 #ifdef CONFIG_X86_64
121         u64                   msr_host_kernel_gs_base;
122         u64                   msr_guest_kernel_gs_base;
123 #endif
124         struct vmcs          *vmcs;
125         struct {
126                 int           loaded;
127                 u16           fs_sel, gs_sel, ldt_sel;
128                 int           gs_ldt_reload_needed;
129                 int           fs_reload_needed;
130         } host_state;
131         struct {
132                 int vm86_active;
133                 u8 save_iopl;
134                 struct kvm_save_segment {
135                         u16 selector;
136                         unsigned long base;
137                         u32 limit;
138                         u32 ar;
139                 } tr, es, ds, fs, gs;
140                 struct {
141                         bool pending;
142                         u8 vector;
143                         unsigned rip;
144                 } irq;
145         } rmode;
146         int vpid;
147         bool emulation_required;
148
149         /* Support for vnmi-less CPUs */
150         int soft_vnmi_blocked;
151         ktime_t entry_time;
152         s64 vnmi_blocked_time;
153         u32 exit_reason;
154
155         bool rdtscp_enabled;
156 };
157
158 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
159 {
160         return container_of(vcpu, struct vcpu_vmx, vcpu);
161 }
162
163 static int init_rmode(struct kvm *kvm);
164 static u64 construct_eptp(unsigned long root_hpa);
165
166 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
167 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
168 static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
169
170 static unsigned long *vmx_io_bitmap_a;
171 static unsigned long *vmx_io_bitmap_b;
172 static unsigned long *vmx_msr_bitmap_legacy;
173 static unsigned long *vmx_msr_bitmap_longmode;
174
175 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
176 static DEFINE_SPINLOCK(vmx_vpid_lock);
177
178 static struct vmcs_config {
179         int size;
180         int order;
181         u32 revision_id;
182         u32 pin_based_exec_ctrl;
183         u32 cpu_based_exec_ctrl;
184         u32 cpu_based_2nd_exec_ctrl;
185         u32 vmexit_ctrl;
186         u32 vmentry_ctrl;
187 } vmcs_config;
188
189 static struct vmx_capability {
190         u32 ept;
191         u32 vpid;
192 } vmx_capability;
193
194 #define VMX_SEGMENT_FIELD(seg)                                  \
195         [VCPU_SREG_##seg] = {                                   \
196                 .selector = GUEST_##seg##_SELECTOR,             \
197                 .base = GUEST_##seg##_BASE,                     \
198                 .limit = GUEST_##seg##_LIMIT,                   \
199                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
200         }
201
202 static struct kvm_vmx_segment_field {
203         unsigned selector;
204         unsigned base;
205         unsigned limit;
206         unsigned ar_bytes;
207 } kvm_vmx_segment_fields[] = {
208         VMX_SEGMENT_FIELD(CS),
209         VMX_SEGMENT_FIELD(DS),
210         VMX_SEGMENT_FIELD(ES),
211         VMX_SEGMENT_FIELD(FS),
212         VMX_SEGMENT_FIELD(GS),
213         VMX_SEGMENT_FIELD(SS),
214         VMX_SEGMENT_FIELD(TR),
215         VMX_SEGMENT_FIELD(LDTR),
216 };
217
218 static u64 host_efer;
219
220 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
221
222 /*
223  * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
224  * away by decrementing the array size.
225  */
226 static const u32 vmx_msr_index[] = {
227 #ifdef CONFIG_X86_64
228         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
229 #endif
230         MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
231 };
232 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
233
234 static inline int is_page_fault(u32 intr_info)
235 {
236         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
237                              INTR_INFO_VALID_MASK)) ==
238                 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
239 }
240
241 static inline int is_no_device(u32 intr_info)
242 {
243         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
244                              INTR_INFO_VALID_MASK)) ==
245                 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
246 }
247
248 static inline int is_invalid_opcode(u32 intr_info)
249 {
250         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
251                              INTR_INFO_VALID_MASK)) ==
252                 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
253 }
254
255 static inline int is_external_interrupt(u32 intr_info)
256 {
257         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
258                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
259 }
260
261 static inline int is_machine_check(u32 intr_info)
262 {
263         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
264                              INTR_INFO_VALID_MASK)) ==
265                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
266 }
267
268 static inline int cpu_has_vmx_msr_bitmap(void)
269 {
270         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
271 }
272
273 static inline int cpu_has_vmx_tpr_shadow(void)
274 {
275         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
276 }
277
278 static inline int vm_need_tpr_shadow(struct kvm *kvm)
279 {
280         return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
281 }
282
283 static inline int cpu_has_secondary_exec_ctrls(void)
284 {
285         return vmcs_config.cpu_based_exec_ctrl &
286                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
287 }
288
289 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
290 {
291         return vmcs_config.cpu_based_2nd_exec_ctrl &
292                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
293 }
294
295 static inline bool cpu_has_vmx_flexpriority(void)
296 {
297         return cpu_has_vmx_tpr_shadow() &&
298                 cpu_has_vmx_virtualize_apic_accesses();
299 }
300
301 static inline bool cpu_has_vmx_ept_execute_only(void)
302 {
303         return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
304 }
305
306 static inline bool cpu_has_vmx_eptp_uncacheable(void)
307 {
308         return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
309 }
310
311 static inline bool cpu_has_vmx_eptp_writeback(void)
312 {
313         return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
314 }
315
316 static inline bool cpu_has_vmx_ept_2m_page(void)
317 {
318         return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
319 }
320
321 static inline int cpu_has_vmx_invept_individual_addr(void)
322 {
323         return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
324 }
325
326 static inline int cpu_has_vmx_invept_context(void)
327 {
328         return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT);
329 }
330
331 static inline int cpu_has_vmx_invept_global(void)
332 {
333         return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT);
334 }
335
336 static inline int cpu_has_vmx_ept(void)
337 {
338         return vmcs_config.cpu_based_2nd_exec_ctrl &
339                 SECONDARY_EXEC_ENABLE_EPT;
340 }
341
342 static inline int cpu_has_vmx_unrestricted_guest(void)
343 {
344         return vmcs_config.cpu_based_2nd_exec_ctrl &
345                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
346 }
347
348 static inline int cpu_has_vmx_ple(void)
349 {
350         return vmcs_config.cpu_based_2nd_exec_ctrl &
351                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
352 }
353
354 static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
355 {
356         return flexpriority_enabled &&
357                 (cpu_has_vmx_virtualize_apic_accesses()) &&
358                 (irqchip_in_kernel(kvm));
359 }
360
361 static inline int cpu_has_vmx_vpid(void)
362 {
363         return vmcs_config.cpu_based_2nd_exec_ctrl &
364                 SECONDARY_EXEC_ENABLE_VPID;
365 }
366
367 static inline int cpu_has_vmx_rdtscp(void)
368 {
369         return vmcs_config.cpu_based_2nd_exec_ctrl &
370                 SECONDARY_EXEC_RDTSCP;
371 }
372
373 static inline int cpu_has_virtual_nmis(void)
374 {
375         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
376 }
377
378 static inline bool report_flexpriority(void)
379 {
380         return flexpriority_enabled;
381 }
382
383 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
384 {
385         int i;
386
387         for (i = 0; i < vmx->nmsrs; ++i)
388                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
389                         return i;
390         return -1;
391 }
392
393 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
394 {
395     struct {
396         u64 vpid : 16;
397         u64 rsvd : 48;
398         u64 gva;
399     } operand = { vpid, 0, gva };
400
401     asm volatile (__ex(ASM_VMX_INVVPID)
402                   /* CF==1 or ZF==1 --> rc = -1 */
403                   "; ja 1f ; ud2 ; 1:"
404                   : : "a"(&operand), "c"(ext) : "cc", "memory");
405 }
406
407 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
408 {
409         struct {
410                 u64 eptp, gpa;
411         } operand = {eptp, gpa};
412
413         asm volatile (__ex(ASM_VMX_INVEPT)
414                         /* CF==1 or ZF==1 --> rc = -1 */
415                         "; ja 1f ; ud2 ; 1:\n"
416                         : : "a" (&operand), "c" (ext) : "cc", "memory");
417 }
418
419 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
420 {
421         int i;
422
423         i = __find_msr_index(vmx, msr);
424         if (i >= 0)
425                 return &vmx->guest_msrs[i];
426         return NULL;
427 }
428
429 static void vmcs_clear(struct vmcs *vmcs)
430 {
431         u64 phys_addr = __pa(vmcs);
432         u8 error;
433
434         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
435                       : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
436                       : "cc", "memory");
437         if (error)
438                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
439                        vmcs, phys_addr);
440 }
441
442 static void __vcpu_clear(void *arg)
443 {
444         struct vcpu_vmx *vmx = arg;
445         int cpu = raw_smp_processor_id();
446
447         if (vmx->vcpu.cpu == cpu)
448                 vmcs_clear(vmx->vmcs);
449         if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
450                 per_cpu(current_vmcs, cpu) = NULL;
451         rdtscll(vmx->vcpu.arch.host_tsc);
452         list_del(&vmx->local_vcpus_link);
453         vmx->vcpu.cpu = -1;
454         vmx->launched = 0;
455 }
456
457 static void vcpu_clear(struct vcpu_vmx *vmx)
458 {
459         if (vmx->vcpu.cpu == -1)
460                 return;
461         smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
462 }
463
464 static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
465 {
466         if (vmx->vpid == 0)
467                 return;
468
469         __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
470 }
471
472 static inline void ept_sync_global(void)
473 {
474         if (cpu_has_vmx_invept_global())
475                 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
476 }
477
478 static inline void ept_sync_context(u64 eptp)
479 {
480         if (enable_ept) {
481                 if (cpu_has_vmx_invept_context())
482                         __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
483                 else
484                         ept_sync_global();
485         }
486 }
487
488 static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
489 {
490         if (enable_ept) {
491                 if (cpu_has_vmx_invept_individual_addr())
492                         __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
493                                         eptp, gpa);
494                 else
495                         ept_sync_context(eptp);
496         }
497 }
498
499 static unsigned long vmcs_readl(unsigned long field)
500 {
501         unsigned long value;
502
503         asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
504                       : "=a"(value) : "d"(field) : "cc");
505         return value;
506 }
507
508 static u16 vmcs_read16(unsigned long field)
509 {
510         return vmcs_readl(field);
511 }
512
513 static u32 vmcs_read32(unsigned long field)
514 {
515         return vmcs_readl(field);
516 }
517
518 static u64 vmcs_read64(unsigned long field)
519 {
520 #ifdef CONFIG_X86_64
521         return vmcs_readl(field);
522 #else
523         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
524 #endif
525 }
526
527 static noinline void vmwrite_error(unsigned long field, unsigned long value)
528 {
529         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
530                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
531         dump_stack();
532 }
533
534 static void vmcs_writel(unsigned long field, unsigned long value)
535 {
536         u8 error;
537
538         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
539                        : "=q"(error) : "a"(value), "d"(field) : "cc");
540         if (unlikely(error))
541                 vmwrite_error(field, value);
542 }
543
544 static void vmcs_write16(unsigned long field, u16 value)
545 {
546         vmcs_writel(field, value);
547 }
548
549 static void vmcs_write32(unsigned long field, u32 value)
550 {
551         vmcs_writel(field, value);
552 }
553
554 static void vmcs_write64(unsigned long field, u64 value)
555 {
556         vmcs_writel(field, value);
557 #ifndef CONFIG_X86_64
558         asm volatile ("");
559         vmcs_writel(field+1, value >> 32);
560 #endif
561 }
562
563 static void vmcs_clear_bits(unsigned long field, u32 mask)
564 {
565         vmcs_writel(field, vmcs_readl(field) & ~mask);
566 }
567
568 static void vmcs_set_bits(unsigned long field, u32 mask)
569 {
570         vmcs_writel(field, vmcs_readl(field) | mask);
571 }
572
573 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
574 {
575         u32 eb;
576
577         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
578         if (!vcpu->fpu_active)
579                 eb |= 1u << NM_VECTOR;
580         /*
581          * Unconditionally intercept #DB so we can maintain dr6 without
582          * reading it every exit.
583          */
584         eb |= 1u << DB_VECTOR;
585         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
586                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
587                         eb |= 1u << BP_VECTOR;
588         }
589         if (to_vmx(vcpu)->rmode.vm86_active)
590                 eb = ~0;
591         if (enable_ept)
592                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
593         vmcs_write32(EXCEPTION_BITMAP, eb);
594 }
595
596 static void reload_tss(void)
597 {
598         /*
599          * VT restores TR but not its size.  Useless.
600          */
601         struct descriptor_table gdt;
602         struct desc_struct *descs;
603
604         kvm_get_gdt(&gdt);
605         descs = (void *)gdt.base;
606         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
607         load_TR_desc();
608 }
609
610 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
611 {
612         u64 guest_efer;
613         u64 ignore_bits;
614
615         guest_efer = vmx->vcpu.arch.shadow_efer;
616
617         /*
618          * NX is emulated; LMA and LME handled by hardware; SCE meaninless
619          * outside long mode
620          */
621         ignore_bits = EFER_NX | EFER_SCE;
622 #ifdef CONFIG_X86_64
623         ignore_bits |= EFER_LMA | EFER_LME;
624         /* SCE is meaningful only in long mode on Intel */
625         if (guest_efer & EFER_LMA)
626                 ignore_bits &= ~(u64)EFER_SCE;
627 #endif
628         guest_efer &= ~ignore_bits;
629         guest_efer |= host_efer & ignore_bits;
630         vmx->guest_msrs[efer_offset].data = guest_efer;
631         vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
632         return true;
633 }
634
635 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
636 {
637         struct vcpu_vmx *vmx = to_vmx(vcpu);
638         int i;
639
640         if (vmx->host_state.loaded)
641                 return;
642
643         vmx->host_state.loaded = 1;
644         /*
645          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
646          * allow segment selectors with cpl > 0 or ti == 1.
647          */
648         vmx->host_state.ldt_sel = kvm_read_ldt();
649         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
650         vmx->host_state.fs_sel = kvm_read_fs();
651         if (!(vmx->host_state.fs_sel & 7)) {
652                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
653                 vmx->host_state.fs_reload_needed = 0;
654         } else {
655                 vmcs_write16(HOST_FS_SELECTOR, 0);
656                 vmx->host_state.fs_reload_needed = 1;
657         }
658         vmx->host_state.gs_sel = kvm_read_gs();
659         if (!(vmx->host_state.gs_sel & 7))
660                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
661         else {
662                 vmcs_write16(HOST_GS_SELECTOR, 0);
663                 vmx->host_state.gs_ldt_reload_needed = 1;
664         }
665
666 #ifdef CONFIG_X86_64
667         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
668         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
669 #else
670         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
671         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
672 #endif
673
674 #ifdef CONFIG_X86_64
675         if (is_long_mode(&vmx->vcpu)) {
676                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
677                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
678         }
679 #endif
680         for (i = 0; i < vmx->save_nmsrs; ++i)
681                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
682                                    vmx->guest_msrs[i].data,
683                                    vmx->guest_msrs[i].mask);
684 }
685
686 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
687 {
688         unsigned long flags;
689
690         if (!vmx->host_state.loaded)
691                 return;
692
693         ++vmx->vcpu.stat.host_state_reload;
694         vmx->host_state.loaded = 0;
695         if (vmx->host_state.fs_reload_needed)
696                 kvm_load_fs(vmx->host_state.fs_sel);
697         if (vmx->host_state.gs_ldt_reload_needed) {
698                 kvm_load_ldt(vmx->host_state.ldt_sel);
699                 /*
700                  * If we have to reload gs, we must take care to
701                  * preserve our gs base.
702                  */
703                 local_irq_save(flags);
704                 kvm_load_gs(vmx->host_state.gs_sel);
705 #ifdef CONFIG_X86_64
706                 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
707 #endif
708                 local_irq_restore(flags);
709         }
710         reload_tss();
711 #ifdef CONFIG_X86_64
712         if (is_long_mode(&vmx->vcpu)) {
713                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
714                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
715         }
716 #endif
717 }
718
719 static void vmx_load_host_state(struct vcpu_vmx *vmx)
720 {
721         preempt_disable();
722         __vmx_load_host_state(vmx);
723         preempt_enable();
724 }
725
726 /*
727  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
728  * vcpu mutex is already taken.
729  */
730 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
731 {
732         struct vcpu_vmx *vmx = to_vmx(vcpu);
733         u64 phys_addr = __pa(vmx->vmcs);
734         u64 tsc_this, delta, new_offset;
735
736         if (vcpu->cpu != cpu) {
737                 vcpu_clear(vmx);
738                 kvm_migrate_timers(vcpu);
739                 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
740                 local_irq_disable();
741                 list_add(&vmx->local_vcpus_link,
742                          &per_cpu(vcpus_on_cpu, cpu));
743                 local_irq_enable();
744         }
745
746         if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
747                 u8 error;
748
749                 per_cpu(current_vmcs, cpu) = vmx->vmcs;
750                 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
751                               : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
752                               : "cc");
753                 if (error)
754                         printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
755                                vmx->vmcs, phys_addr);
756         }
757
758         if (vcpu->cpu != cpu) {
759                 struct descriptor_table dt;
760                 unsigned long sysenter_esp;
761
762                 vcpu->cpu = cpu;
763                 /*
764                  * Linux uses per-cpu TSS and GDT, so set these when switching
765                  * processors.
766                  */
767                 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
768                 kvm_get_gdt(&dt);
769                 vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
770
771                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
772                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
773
774                 /*
775                  * Make sure the time stamp counter is monotonous.
776                  */
777                 rdtscll(tsc_this);
778                 if (tsc_this < vcpu->arch.host_tsc) {
779                         delta = vcpu->arch.host_tsc - tsc_this;
780                         new_offset = vmcs_read64(TSC_OFFSET) + delta;
781                         vmcs_write64(TSC_OFFSET, new_offset);
782                 }
783         }
784 }
785
786 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
787 {
788         __vmx_load_host_state(to_vmx(vcpu));
789 }
790
791 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
792 {
793         if (vcpu->fpu_active)
794                 return;
795         vcpu->fpu_active = 1;
796         vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
797         if (vcpu->arch.cr0 & X86_CR0_TS)
798                 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
799         update_exception_bitmap(vcpu);
800 }
801
802 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
803 {
804         if (!vcpu->fpu_active)
805                 return;
806         vcpu->fpu_active = 0;
807         vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
808         update_exception_bitmap(vcpu);
809 }
810
811 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
812 {
813         unsigned long rflags;
814
815         rflags = vmcs_readl(GUEST_RFLAGS);
816         if (to_vmx(vcpu)->rmode.vm86_active)
817                 rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
818         return rflags;
819 }
820
821 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
822 {
823         if (to_vmx(vcpu)->rmode.vm86_active)
824                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
825         vmcs_writel(GUEST_RFLAGS, rflags);
826 }
827
828 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
829 {
830         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
831         int ret = 0;
832
833         if (interruptibility & GUEST_INTR_STATE_STI)
834                 ret |= X86_SHADOW_INT_STI;
835         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
836                 ret |= X86_SHADOW_INT_MOV_SS;
837
838         return ret & mask;
839 }
840
841 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
842 {
843         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
844         u32 interruptibility = interruptibility_old;
845
846         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
847
848         if (mask & X86_SHADOW_INT_MOV_SS)
849                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
850         if (mask & X86_SHADOW_INT_STI)
851                 interruptibility |= GUEST_INTR_STATE_STI;
852
853         if ((interruptibility != interruptibility_old))
854                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
855 }
856
857 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
858 {
859         unsigned long rip;
860
861         rip = kvm_rip_read(vcpu);
862         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
863         kvm_rip_write(vcpu, rip);
864
865         /* skipping an emulated instruction also counts */
866         vmx_set_interrupt_shadow(vcpu, 0);
867 }
868
869 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
870                                 bool has_error_code, u32 error_code)
871 {
872         struct vcpu_vmx *vmx = to_vmx(vcpu);
873         u32 intr_info = nr | INTR_INFO_VALID_MASK;
874
875         if (has_error_code) {
876                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
877                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
878         }
879
880         if (vmx->rmode.vm86_active) {
881                 vmx->rmode.irq.pending = true;
882                 vmx->rmode.irq.vector = nr;
883                 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
884                 if (kvm_exception_is_soft(nr))
885                         vmx->rmode.irq.rip +=
886                                 vmx->vcpu.arch.event_exit_inst_len;
887                 intr_info |= INTR_TYPE_SOFT_INTR;
888                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
889                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
890                 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
891                 return;
892         }
893
894         if (kvm_exception_is_soft(nr)) {
895                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
896                              vmx->vcpu.arch.event_exit_inst_len);
897                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
898         } else
899                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
900
901         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
902 }
903
904 static bool vmx_rdtscp_supported(void)
905 {
906         return cpu_has_vmx_rdtscp();
907 }
908
909 /*
910  * Swap MSR entry in host/guest MSR entry array.
911  */
912 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
913 {
914         struct shared_msr_entry tmp;
915
916         tmp = vmx->guest_msrs[to];
917         vmx->guest_msrs[to] = vmx->guest_msrs[from];
918         vmx->guest_msrs[from] = tmp;
919 }
920
921 /*
922  * Set up the vmcs to automatically save and restore system
923  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
924  * mode, as fiddling with msrs is very expensive.
925  */
926 static void setup_msrs(struct vcpu_vmx *vmx)
927 {
928         int save_nmsrs, index;
929         unsigned long *msr_bitmap;
930
931         vmx_load_host_state(vmx);
932         save_nmsrs = 0;
933 #ifdef CONFIG_X86_64
934         if (is_long_mode(&vmx->vcpu)) {
935                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
936                 if (index >= 0)
937                         move_msr_up(vmx, index, save_nmsrs++);
938                 index = __find_msr_index(vmx, MSR_LSTAR);
939                 if (index >= 0)
940                         move_msr_up(vmx, index, save_nmsrs++);
941                 index = __find_msr_index(vmx, MSR_CSTAR);
942                 if (index >= 0)
943                         move_msr_up(vmx, index, save_nmsrs++);
944                 index = __find_msr_index(vmx, MSR_TSC_AUX);
945                 if (index >= 0 && vmx->rdtscp_enabled)
946                         move_msr_up(vmx, index, save_nmsrs++);
947                 /*
948                  * MSR_K6_STAR is only needed on long mode guests, and only
949                  * if efer.sce is enabled.
950                  */
951                 index = __find_msr_index(vmx, MSR_K6_STAR);
952                 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
953                         move_msr_up(vmx, index, save_nmsrs++);
954         }
955 #endif
956         index = __find_msr_index(vmx, MSR_EFER);
957         if (index >= 0 && update_transition_efer(vmx, index))
958                 move_msr_up(vmx, index, save_nmsrs++);
959
960         vmx->save_nmsrs = save_nmsrs;
961
962         if (cpu_has_vmx_msr_bitmap()) {
963                 if (is_long_mode(&vmx->vcpu))
964                         msr_bitmap = vmx_msr_bitmap_longmode;
965                 else
966                         msr_bitmap = vmx_msr_bitmap_legacy;
967
968                 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
969         }
970 }
971
972 /*
973  * reads and returns guest's timestamp counter "register"
974  * guest_tsc = host_tsc + tsc_offset    -- 21.3
975  */
976 static u64 guest_read_tsc(void)
977 {
978         u64 host_tsc, tsc_offset;
979
980         rdtscll(host_tsc);
981         tsc_offset = vmcs_read64(TSC_OFFSET);
982         return host_tsc + tsc_offset;
983 }
984
985 /*
986  * writes 'guest_tsc' into guest's timestamp counter "register"
987  * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
988  */
989 static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
990 {
991         vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
992 }
993
994 /*
995  * Reads an msr value (of 'msr_index') into 'pdata'.
996  * Returns 0 on success, non-0 otherwise.
997  * Assumes vcpu_load() was already called.
998  */
999 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1000 {
1001         u64 data;
1002         struct shared_msr_entry *msr;
1003
1004         if (!pdata) {
1005                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
1006                 return -EINVAL;
1007         }
1008
1009         switch (msr_index) {
1010 #ifdef CONFIG_X86_64
1011         case MSR_FS_BASE:
1012                 data = vmcs_readl(GUEST_FS_BASE);
1013                 break;
1014         case MSR_GS_BASE:
1015                 data = vmcs_readl(GUEST_GS_BASE);
1016                 break;
1017         case MSR_KERNEL_GS_BASE:
1018                 vmx_load_host_state(to_vmx(vcpu));
1019                 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
1020                 break;
1021 #endif
1022         case MSR_EFER:
1023                 return kvm_get_msr_common(vcpu, msr_index, pdata);
1024         case MSR_IA32_TSC:
1025                 data = guest_read_tsc();
1026                 break;
1027         case MSR_IA32_SYSENTER_CS:
1028                 data = vmcs_read32(GUEST_SYSENTER_CS);
1029                 break;
1030         case MSR_IA32_SYSENTER_EIP:
1031                 data = vmcs_readl(GUEST_SYSENTER_EIP);
1032                 break;
1033         case MSR_IA32_SYSENTER_ESP:
1034                 data = vmcs_readl(GUEST_SYSENTER_ESP);
1035                 break;
1036         case MSR_TSC_AUX:
1037                 if (!to_vmx(vcpu)->rdtscp_enabled)
1038                         return 1;
1039                 /* Otherwise falls through */
1040         default:
1041                 vmx_load_host_state(to_vmx(vcpu));
1042                 msr = find_msr_entry(to_vmx(vcpu), msr_index);
1043                 if (msr) {
1044                         vmx_load_host_state(to_vmx(vcpu));
1045                         data = msr->data;
1046                         break;
1047                 }
1048                 return kvm_get_msr_common(vcpu, msr_index, pdata);
1049         }
1050
1051         *pdata = data;
1052         return 0;
1053 }
1054
1055 /*
1056  * Writes msr value into into the appropriate "register".
1057  * Returns 0 on success, non-0 otherwise.
1058  * Assumes vcpu_load() was already called.
1059  */
1060 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1061 {
1062         struct vcpu_vmx *vmx = to_vmx(vcpu);
1063         struct shared_msr_entry *msr;
1064         u64 host_tsc;
1065         int ret = 0;
1066
1067         switch (msr_index) {
1068         case MSR_EFER:
1069                 vmx_load_host_state(vmx);
1070                 ret = kvm_set_msr_common(vcpu, msr_index, data);
1071                 break;
1072 #ifdef CONFIG_X86_64
1073         case MSR_FS_BASE:
1074                 vmcs_writel(GUEST_FS_BASE, data);
1075                 break;
1076         case MSR_GS_BASE:
1077                 vmcs_writel(GUEST_GS_BASE, data);
1078                 break;
1079         case MSR_KERNEL_GS_BASE:
1080                 vmx_load_host_state(vmx);
1081                 vmx->msr_guest_kernel_gs_base = data;
1082                 break;
1083 #endif
1084         case MSR_IA32_SYSENTER_CS:
1085                 vmcs_write32(GUEST_SYSENTER_CS, data);
1086                 break;
1087         case MSR_IA32_SYSENTER_EIP:
1088                 vmcs_writel(GUEST_SYSENTER_EIP, data);
1089                 break;
1090         case MSR_IA32_SYSENTER_ESP:
1091                 vmcs_writel(GUEST_SYSENTER_ESP, data);
1092                 break;
1093         case MSR_IA32_TSC:
1094                 rdtscll(host_tsc);
1095                 guest_write_tsc(data, host_tsc);
1096                 break;
1097         case MSR_IA32_CR_PAT:
1098                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1099                         vmcs_write64(GUEST_IA32_PAT, data);
1100                         vcpu->arch.pat = data;
1101                         break;
1102                 }
1103                 ret = kvm_set_msr_common(vcpu, msr_index, data);
1104                 break;
1105         case MSR_TSC_AUX:
1106                 if (!vmx->rdtscp_enabled)
1107                         return 1;
1108                 /* Check reserved bit, higher 32 bits should be zero */
1109                 if ((data >> 32) != 0)
1110                         return 1;
1111                 /* Otherwise falls through */
1112         default:
1113                 msr = find_msr_entry(vmx, msr_index);
1114                 if (msr) {
1115                         vmx_load_host_state(vmx);
1116                         msr->data = data;
1117                         break;
1118                 }
1119                 ret = kvm_set_msr_common(vcpu, msr_index, data);
1120         }
1121
1122         return ret;
1123 }
1124
1125 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1126 {
1127         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
1128         switch (reg) {
1129         case VCPU_REGS_RSP:
1130                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
1131                 break;
1132         case VCPU_REGS_RIP:
1133                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
1134                 break;
1135         case VCPU_EXREG_PDPTR:
1136                 if (enable_ept)
1137                         ept_save_pdptrs(vcpu);
1138                 break;
1139         default:
1140                 break;
1141         }
1142 }
1143
1144 static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
1145 {
1146         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1147                 vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
1148         else
1149                 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
1150
1151         update_exception_bitmap(vcpu);
1152 }
1153
1154 static __init int cpu_has_kvm_support(void)
1155 {
1156         return cpu_has_vmx();
1157 }
1158
1159 static __init int vmx_disabled_by_bios(void)
1160 {
1161         u64 msr;
1162
1163         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
1164         return (msr & (FEATURE_CONTROL_LOCKED |
1165                        FEATURE_CONTROL_VMXON_ENABLED))
1166             == FEATURE_CONTROL_LOCKED;
1167         /* locked but not enabled */
1168 }
1169
1170 static int hardware_enable(void *garbage)
1171 {
1172         int cpu = raw_smp_processor_id();
1173         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1174         u64 old;
1175
1176         if (read_cr4() & X86_CR4_VMXE)
1177                 return -EBUSY;
1178
1179         INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
1180         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
1181         if ((old & (FEATURE_CONTROL_LOCKED |
1182                     FEATURE_CONTROL_VMXON_ENABLED))
1183             != (FEATURE_CONTROL_LOCKED |
1184                 FEATURE_CONTROL_VMXON_ENABLED))
1185                 /* enable and lock */
1186                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
1187                        FEATURE_CONTROL_LOCKED |
1188                        FEATURE_CONTROL_VMXON_ENABLED);
1189         write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
1190         asm volatile (ASM_VMX_VMXON_RAX
1191                       : : "a"(&phys_addr), "m"(phys_addr)
1192                       : "memory", "cc");
1193
1194         ept_sync_global();
1195
1196         return 0;
1197 }
1198
1199 static void vmclear_local_vcpus(void)
1200 {
1201         int cpu = raw_smp_processor_id();
1202         struct vcpu_vmx *vmx, *n;
1203
1204         list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
1205                                  local_vcpus_link)
1206                 __vcpu_clear(vmx);
1207 }
1208
1209
1210 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
1211  * tricks.
1212  */
1213 static void kvm_cpu_vmxoff(void)
1214 {
1215         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
1216         write_cr4(read_cr4() & ~X86_CR4_VMXE);
1217 }
1218
1219 static void hardware_disable(void *garbage)
1220 {
1221         vmclear_local_vcpus();
1222         kvm_cpu_vmxoff();
1223 }
1224
1225 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
1226                                       u32 msr, u32 *result)
1227 {
1228         u32 vmx_msr_low, vmx_msr_high;
1229         u32 ctl = ctl_min | ctl_opt;
1230
1231         rdmsr(msr, vmx_msr_low, vmx_msr_high);
1232
1233         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
1234         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
1235
1236         /* Ensure minimum (required) set of control bits are supported. */
1237         if (ctl_min & ~ctl)
1238                 return -EIO;
1239
1240         *result = ctl;
1241         return 0;
1242 }
1243
1244 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1245 {
1246         u32 vmx_msr_low, vmx_msr_high;
1247         u32 min, opt, min2, opt2;
1248         u32 _pin_based_exec_control = 0;
1249         u32 _cpu_based_exec_control = 0;
1250         u32 _cpu_based_2nd_exec_control = 0;
1251         u32 _vmexit_control = 0;
1252         u32 _vmentry_control = 0;
1253
1254         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
1255         opt = PIN_BASED_VIRTUAL_NMIS;
1256         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
1257                                 &_pin_based_exec_control) < 0)
1258                 return -EIO;
1259
1260         min = CPU_BASED_HLT_EXITING |
1261 #ifdef CONFIG_X86_64
1262               CPU_BASED_CR8_LOAD_EXITING |
1263               CPU_BASED_CR8_STORE_EXITING |
1264 #endif
1265               CPU_BASED_CR3_LOAD_EXITING |
1266               CPU_BASED_CR3_STORE_EXITING |
1267               CPU_BASED_USE_IO_BITMAPS |
1268               CPU_BASED_MOV_DR_EXITING |
1269               CPU_BASED_USE_TSC_OFFSETING |
1270               CPU_BASED_MWAIT_EXITING |
1271               CPU_BASED_MONITOR_EXITING |
1272               CPU_BASED_INVLPG_EXITING;
1273         opt = CPU_BASED_TPR_SHADOW |
1274               CPU_BASED_USE_MSR_BITMAPS |
1275               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1276         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1277                                 &_cpu_based_exec_control) < 0)
1278                 return -EIO;
1279 #ifdef CONFIG_X86_64
1280         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
1281                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
1282                                            ~CPU_BASED_CR8_STORE_EXITING;
1283 #endif
1284         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
1285                 min2 = 0;
1286                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
1287                         SECONDARY_EXEC_WBINVD_EXITING |
1288                         SECONDARY_EXEC_ENABLE_VPID |
1289                         SECONDARY_EXEC_ENABLE_EPT |
1290                         SECONDARY_EXEC_UNRESTRICTED_GUEST |
1291                         SECONDARY_EXEC_PAUSE_LOOP_EXITING |
1292                         SECONDARY_EXEC_RDTSCP;
1293                 if (adjust_vmx_controls(min2, opt2,
1294                                         MSR_IA32_VMX_PROCBASED_CTLS2,
1295                                         &_cpu_based_2nd_exec_control) < 0)
1296                         return -EIO;
1297         }
1298 #ifndef CONFIG_X86_64
1299         if (!(_cpu_based_2nd_exec_control &
1300                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
1301                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
1302 #endif
1303         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
1304                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
1305                    enabled */
1306                 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
1307                                              CPU_BASED_CR3_STORE_EXITING |
1308                                              CPU_BASED_INVLPG_EXITING);
1309                 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
1310                       vmx_capability.ept, vmx_capability.vpid);
1311         }
1312
1313         min = 0;
1314 #ifdef CONFIG_X86_64
1315         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
1316 #endif
1317         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
1318         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
1319                                 &_vmexit_control) < 0)
1320                 return -EIO;
1321
1322         min = 0;
1323         opt = VM_ENTRY_LOAD_IA32_PAT;
1324         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
1325                                 &_vmentry_control) < 0)
1326                 return -EIO;
1327
1328         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
1329
1330         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
1331         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
1332                 return -EIO;
1333
1334 #ifdef CONFIG_X86_64
1335         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
1336         if (vmx_msr_high & (1u<<16))
1337                 return -EIO;
1338 #endif
1339
1340         /* Require Write-Back (WB) memory type for VMCS accesses. */
1341         if (((vmx_msr_high >> 18) & 15) != 6)
1342                 return -EIO;
1343
1344         vmcs_conf->size = vmx_msr_high & 0x1fff;
1345         vmcs_conf->order = get_order(vmcs_config.size);
1346         vmcs_conf->revision_id = vmx_msr_low;
1347
1348         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
1349         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
1350         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
1351         vmcs_conf->vmexit_ctrl         = _vmexit_control;
1352         vmcs_conf->vmentry_ctrl        = _vmentry_control;
1353
1354         return 0;
1355 }
1356
1357 static struct vmcs *alloc_vmcs_cpu(int cpu)
1358 {
1359         int node = cpu_to_node(cpu);
1360         struct page *pages;
1361         struct vmcs *vmcs;
1362
1363         pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
1364         if (!pages)
1365                 return NULL;
1366         vmcs = page_address(pages);
1367         memset(vmcs, 0, vmcs_config.size);
1368         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
1369         return vmcs;
1370 }
1371
1372 static struct vmcs *alloc_vmcs(void)
1373 {
1374         return alloc_vmcs_cpu(raw_smp_processor_id());
1375 }
1376
1377 static void free_vmcs(struct vmcs *vmcs)
1378 {
1379         free_pages((unsigned long)vmcs, vmcs_config.order);
1380 }
1381
1382 static void free_kvm_area(void)
1383 {
1384         int cpu;
1385
1386         for_each_possible_cpu(cpu) {
1387                 free_vmcs(per_cpu(vmxarea, cpu));
1388                 per_cpu(vmxarea, cpu) = NULL;
1389         }
1390 }
1391
1392 static __init int alloc_kvm_area(void)
1393 {
1394         int cpu;
1395
1396         for_each_possible_cpu(cpu) {
1397                 struct vmcs *vmcs;
1398
1399                 vmcs = alloc_vmcs_cpu(cpu);
1400                 if (!vmcs) {
1401                         free_kvm_area();
1402                         return -ENOMEM;
1403                 }
1404
1405                 per_cpu(vmxarea, cpu) = vmcs;
1406         }
1407         return 0;
1408 }
1409
1410 static __init int hardware_setup(void)
1411 {
1412         if (setup_vmcs_config(&vmcs_config) < 0)
1413                 return -EIO;
1414
1415         if (boot_cpu_has(X86_FEATURE_NX))
1416                 kvm_enable_efer_bits(EFER_NX);
1417
1418         if (!cpu_has_vmx_vpid())
1419                 enable_vpid = 0;
1420
1421         if (!cpu_has_vmx_ept()) {
1422                 enable_ept = 0;
1423                 enable_unrestricted_guest = 0;
1424         }
1425
1426         if (!cpu_has_vmx_unrestricted_guest())
1427                 enable_unrestricted_guest = 0;
1428
1429         if (!cpu_has_vmx_flexpriority())
1430                 flexpriority_enabled = 0;
1431
1432         if (!cpu_has_vmx_tpr_shadow())
1433                 kvm_x86_ops->update_cr8_intercept = NULL;
1434
1435         if (enable_ept && !cpu_has_vmx_ept_2m_page())
1436                 kvm_disable_largepages();
1437
1438         if (!cpu_has_vmx_ple())
1439                 ple_gap = 0;
1440
1441         return alloc_kvm_area();
1442 }
1443
1444 static __exit void hardware_unsetup(void)
1445 {
1446         free_kvm_area();
1447 }
1448
1449 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1450 {
1451         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1452
1453         if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
1454                 vmcs_write16(sf->selector, save->selector);
1455                 vmcs_writel(sf->base, save->base);
1456                 vmcs_write32(sf->limit, save->limit);
1457                 vmcs_write32(sf->ar_bytes, save->ar);
1458         } else {
1459                 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1460                         << AR_DPL_SHIFT;
1461                 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1462         }
1463 }
1464
1465 static void enter_pmode(struct kvm_vcpu *vcpu)
1466 {
1467         unsigned long flags;
1468         struct vcpu_vmx *vmx = to_vmx(vcpu);
1469
1470         vmx->emulation_required = 1;
1471         vmx->rmode.vm86_active = 0;
1472
1473         vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
1474         vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
1475         vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
1476
1477         flags = vmcs_readl(GUEST_RFLAGS);
1478         flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1479         flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
1480         vmcs_writel(GUEST_RFLAGS, flags);
1481
1482         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1483                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
1484
1485         update_exception_bitmap(vcpu);
1486
1487         if (emulate_invalid_guest_state)
1488                 return;
1489
1490         fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
1491         fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
1492         fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
1493         fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
1494
1495         vmcs_write16(GUEST_SS_SELECTOR, 0);
1496         vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1497
1498         vmcs_write16(GUEST_CS_SELECTOR,
1499                      vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1500         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1501 }
1502
1503 static gva_t rmode_tss_base(struct kvm *kvm)
1504 {
1505         if (!kvm->arch.tss_addr) {
1506                 struct kvm_memslots *slots;
1507                 gfn_t base_gfn;
1508
1509                 slots = rcu_dereference(kvm->memslots);
1510                 base_gfn = kvm->memslots->memslots[0].base_gfn +
1511                                  kvm->memslots->memslots[0].npages - 3;
1512                 return base_gfn << PAGE_SHIFT;
1513         }
1514         return kvm->arch.tss_addr;
1515 }
1516
1517 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1518 {
1519         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1520
1521         save->selector = vmcs_read16(sf->selector);
1522         save->base = vmcs_readl(sf->base);
1523         save->limit = vmcs_read32(sf->limit);
1524         save->ar = vmcs_read32(sf->ar_bytes);
1525         vmcs_write16(sf->selector, save->base >> 4);
1526         vmcs_write32(sf->base, save->base & 0xfffff);
1527         vmcs_write32(sf->limit, 0xffff);
1528         vmcs_write32(sf->ar_bytes, 0xf3);
1529 }
1530
1531 static void enter_rmode(struct kvm_vcpu *vcpu)
1532 {
1533         unsigned long flags;
1534         struct vcpu_vmx *vmx = to_vmx(vcpu);
1535
1536         if (enable_unrestricted_guest)
1537                 return;
1538
1539         vmx->emulation_required = 1;
1540         vmx->rmode.vm86_active = 1;
1541
1542         vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1543         vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1544
1545         vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1546         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1547
1548         vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1549         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1550
1551         flags = vmcs_readl(GUEST_RFLAGS);
1552         vmx->rmode.save_iopl
1553                 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1554
1555         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1556
1557         vmcs_writel(GUEST_RFLAGS, flags);
1558         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
1559         update_exception_bitmap(vcpu);
1560
1561         if (emulate_invalid_guest_state)
1562                 goto continue_rmode;
1563
1564         vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1565         vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1566         vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1567
1568         vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
1569         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1570         if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1571                 vmcs_writel(GUEST_CS_BASE, 0xf0000);
1572         vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1573
1574         fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
1575         fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
1576         fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
1577         fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
1578
1579 continue_rmode:
1580         kvm_mmu_reset_context(vcpu);
1581         init_rmode(vcpu->kvm);
1582 }
1583
1584 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1585 {
1586         struct vcpu_vmx *vmx = to_vmx(vcpu);
1587         struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1588
1589         if (!msr)
1590                 return;
1591
1592         /*
1593          * Force kernel_gs_base reloading before EFER changes, as control
1594          * of this msr depends on is_long_mode().
1595          */
1596         vmx_load_host_state(to_vmx(vcpu));
1597         vcpu->arch.shadow_efer = efer;
1598         if (!msr)
1599                 return;
1600         if (efer & EFER_LMA) {
1601                 vmcs_write32(VM_ENTRY_CONTROLS,
1602                              vmcs_read32(VM_ENTRY_CONTROLS) |
1603                              VM_ENTRY_IA32E_MODE);
1604                 msr->data = efer;
1605         } else {
1606                 vmcs_write32(VM_ENTRY_CONTROLS,
1607                              vmcs_read32(VM_ENTRY_CONTROLS) &
1608                              ~VM_ENTRY_IA32E_MODE);
1609
1610                 msr->data = efer & ~EFER_LME;
1611         }
1612         setup_msrs(vmx);
1613 }
1614
1615 #ifdef CONFIG_X86_64
1616
1617 static void enter_lmode(struct kvm_vcpu *vcpu)
1618 {
1619         u32 guest_tr_ar;
1620
1621         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1622         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1623                 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1624                        __func__);
1625                 vmcs_write32(GUEST_TR_AR_BYTES,
1626                              (guest_tr_ar & ~AR_TYPE_MASK)
1627                              | AR_TYPE_BUSY_64_TSS);
1628         }
1629         vcpu->arch.shadow_efer |= EFER_LMA;
1630         vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
1631 }
1632
1633 static void exit_lmode(struct kvm_vcpu *vcpu)
1634 {
1635         vcpu->arch.shadow_efer &= ~EFER_LMA;
1636
1637         vmcs_write32(VM_ENTRY_CONTROLS,
1638                      vmcs_read32(VM_ENTRY_CONTROLS)
1639                      & ~VM_ENTRY_IA32E_MODE);
1640 }
1641
1642 #endif
1643
1644 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1645 {
1646         vpid_sync_vcpu_all(to_vmx(vcpu));
1647         if (enable_ept)
1648                 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
1649 }
1650
1651 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1652 {
1653         ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
1654
1655         vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
1656         vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
1657 }
1658
1659 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
1660 {
1661         if (!test_bit(VCPU_EXREG_PDPTR,
1662                       (unsigned long *)&vcpu->arch.regs_dirty))
1663                 return;
1664
1665         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1666                 vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]);
1667                 vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]);
1668                 vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]);
1669                 vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]);
1670         }
1671 }
1672
1673 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
1674 {
1675         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1676                 vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
1677                 vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
1678                 vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
1679                 vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
1680         }
1681
1682         __set_bit(VCPU_EXREG_PDPTR,
1683                   (unsigned long *)&vcpu->arch.regs_avail);
1684         __set_bit(VCPU_EXREG_PDPTR,
1685                   (unsigned long *)&vcpu->arch.regs_dirty);
1686 }
1687
1688 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1689
1690 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1691                                         unsigned long cr0,
1692                                         struct kvm_vcpu *vcpu)
1693 {
1694         if (!(cr0 & X86_CR0_PG)) {
1695                 /* From paging/starting to nonpaging */
1696                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
1697                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
1698                              (CPU_BASED_CR3_LOAD_EXITING |
1699                               CPU_BASED_CR3_STORE_EXITING));
1700                 vcpu->arch.cr0 = cr0;
1701                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
1702         } else if (!is_paging(vcpu)) {
1703                 /* From nonpaging to paging */
1704                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
1705                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
1706                              ~(CPU_BASED_CR3_LOAD_EXITING |
1707                                CPU_BASED_CR3_STORE_EXITING));
1708                 vcpu->arch.cr0 = cr0;
1709                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
1710         }
1711
1712         if (!(cr0 & X86_CR0_WP))
1713                 *hw_cr0 &= ~X86_CR0_WP;
1714 }
1715
1716 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1717 {
1718         struct vcpu_vmx *vmx = to_vmx(vcpu);
1719         unsigned long hw_cr0;
1720
1721         if (enable_unrestricted_guest)
1722                 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
1723                         | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
1724         else
1725                 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
1726
1727         vmx_fpu_deactivate(vcpu);
1728
1729         if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
1730                 enter_pmode(vcpu);
1731
1732         if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
1733                 enter_rmode(vcpu);
1734
1735 #ifdef CONFIG_X86_64
1736         if (vcpu->arch.shadow_efer & EFER_LME) {
1737                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1738                         enter_lmode(vcpu);
1739                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
1740                         exit_lmode(vcpu);
1741         }
1742 #endif
1743
1744         if (enable_ept)
1745                 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
1746
1747         vmcs_writel(CR0_READ_SHADOW, cr0);
1748         vmcs_writel(GUEST_CR0, hw_cr0);
1749         vcpu->arch.cr0 = cr0;
1750
1751         if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1752                 vmx_fpu_activate(vcpu);
1753 }
1754
1755 static u64 construct_eptp(unsigned long root_hpa)
1756 {
1757         u64 eptp;
1758
1759         /* TODO write the value reading from MSR */
1760         eptp = VMX_EPT_DEFAULT_MT |
1761                 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
1762         eptp |= (root_hpa & PAGE_MASK);
1763
1764         return eptp;
1765 }
1766
1767 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1768 {
1769         unsigned long guest_cr3;
1770         u64 eptp;
1771
1772         guest_cr3 = cr3;
1773         if (enable_ept) {
1774                 eptp = construct_eptp(cr3);
1775                 vmcs_write64(EPT_POINTER, eptp);
1776                 guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
1777                         vcpu->kvm->arch.ept_identity_map_addr;
1778                 ept_load_pdptrs(vcpu);
1779         }
1780
1781         vmx_flush_tlb(vcpu);
1782         vmcs_writel(GUEST_CR3, guest_cr3);
1783         if (vcpu->arch.cr0 & X86_CR0_PE)
1784                 vmx_fpu_deactivate(vcpu);
1785 }
1786
1787 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1788 {
1789         unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
1790                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
1791
1792         vcpu->arch.cr4 = cr4;
1793         if (enable_ept) {
1794                 if (!is_paging(vcpu)) {
1795                         hw_cr4 &= ~X86_CR4_PAE;
1796                         hw_cr4 |= X86_CR4_PSE;
1797                 } else if (!(cr4 & X86_CR4_PAE)) {
1798                         hw_cr4 &= ~X86_CR4_PAE;
1799                 }
1800         }
1801
1802         vmcs_writel(CR4_READ_SHADOW, cr4);
1803         vmcs_writel(GUEST_CR4, hw_cr4);
1804 }
1805
1806 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1807 {
1808         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1809
1810         return vmcs_readl(sf->base);
1811 }
1812
1813 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1814                             struct kvm_segment *var, int seg)
1815 {
1816         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1817         u32 ar;
1818
1819         var->base = vmcs_readl(sf->base);
1820         var->limit = vmcs_read32(sf->limit);
1821         var->selector = vmcs_read16(sf->selector);
1822         ar = vmcs_read32(sf->ar_bytes);
1823         if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
1824                 ar = 0;
1825         var->type = ar & 15;
1826         var->s = (ar >> 4) & 1;
1827         var->dpl = (ar >> 5) & 3;
1828         var->present = (ar >> 7) & 1;
1829         var->avl = (ar >> 12) & 1;
1830         var->l = (ar >> 13) & 1;
1831         var->db = (ar >> 14) & 1;
1832         var->g = (ar >> 15) & 1;
1833         var->unusable = (ar >> 16) & 1;
1834 }
1835
1836 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
1837 {
1838         if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
1839                 return 0;
1840
1841         if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
1842                 return 3;
1843
1844         return vmcs_read16(GUEST_CS_SELECTOR) & 3;
1845 }
1846
1847 static u32 vmx_segment_access_rights(struct kvm_segment *var)
1848 {
1849         u32 ar;
1850
1851         if (var->unusable)
1852                 ar = 1 << 16;
1853         else {
1854                 ar = var->type & 15;
1855                 ar |= (var->s & 1) << 4;
1856                 ar |= (var->dpl & 3) << 5;
1857                 ar |= (var->present & 1) << 7;
1858                 ar |= (var->avl & 1) << 12;
1859                 ar |= (var->l & 1) << 13;
1860                 ar |= (var->db & 1) << 14;
1861                 ar |= (var->g & 1) << 15;
1862         }
1863         if (ar == 0) /* a 0 value means unusable */
1864                 ar = AR_UNUSABLE_MASK;
1865
1866         return ar;
1867 }
1868
1869 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1870                             struct kvm_segment *var, int seg)
1871 {
1872         struct vcpu_vmx *vmx = to_vmx(vcpu);
1873         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1874         u32 ar;
1875
1876         if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
1877                 vmx->rmode.tr.selector = var->selector;
1878                 vmx->rmode.tr.base = var->base;
1879                 vmx->rmode.tr.limit = var->limit;
1880                 vmx->rmode.tr.ar = vmx_segment_access_rights(var);
1881                 return;
1882         }
1883         vmcs_writel(sf->base, var->base);
1884         vmcs_write32(sf->limit, var->limit);
1885         vmcs_write16(sf->selector, var->selector);
1886         if (vmx->rmode.vm86_active && var->s) {
1887                 /*
1888                  * Hack real-mode segments into vm86 compatibility.
1889                  */
1890                 if (var->base == 0xffff0000 && var->selector == 0xf000)
1891                         vmcs_writel(sf->base, 0xf0000);
1892                 ar = 0xf3;
1893         } else
1894                 ar = vmx_segment_access_rights(var);
1895
1896         /*
1897          *   Fix the "Accessed" bit in AR field of segment registers for older
1898          * qemu binaries.
1899          *   IA32 arch specifies that at the time of processor reset the
1900          * "Accessed" bit in the AR field of segment registers is 1. And qemu
1901          * is setting it to 0 in the usedland code. This causes invalid guest
1902          * state vmexit when "unrestricted guest" mode is turned on.
1903          *    Fix for this setup issue in cpu_reset is being pushed in the qemu
1904          * tree. Newer qemu binaries with that qemu fix would not need this
1905          * kvm hack.
1906          */
1907         if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
1908                 ar |= 0x1; /* Accessed */
1909
1910         vmcs_write32(sf->ar_bytes, ar);
1911 }
1912
1913 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1914 {
1915         u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1916
1917         *db = (ar >> 14) & 1;
1918         *l = (ar >> 13) & 1;
1919 }
1920
1921 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1922 {
1923         dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1924         dt->base = vmcs_readl(GUEST_IDTR_BASE);
1925 }
1926
1927 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1928 {
1929         vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1930         vmcs_writel(GUEST_IDTR_BASE, dt->base);
1931 }
1932
1933 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1934 {
1935         dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1936         dt->base = vmcs_readl(GUEST_GDTR_BASE);
1937 }
1938
1939 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1940 {
1941         vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1942         vmcs_writel(GUEST_GDTR_BASE, dt->base);
1943 }
1944
1945 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
1946 {
1947         struct kvm_segment var;
1948         u32 ar;
1949
1950         vmx_get_segment(vcpu, &var, seg);
1951         ar = vmx_segment_access_rights(&var);
1952
1953         if (var.base != (var.selector << 4))
1954                 return false;
1955         if (var.limit != 0xffff)
1956                 return false;
1957         if (ar != 0xf3)
1958                 return false;
1959
1960         return true;
1961 }
1962
1963 static bool code_segment_valid(struct kvm_vcpu *vcpu)
1964 {
1965         struct kvm_segment cs;
1966         unsigned int cs_rpl;
1967
1968         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
1969         cs_rpl = cs.selector & SELECTOR_RPL_MASK;
1970
1971         if (cs.unusable)
1972                 return false;
1973         if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
1974                 return false;
1975         if (!cs.s)
1976                 return false;
1977         if (cs.type & AR_TYPE_WRITEABLE_MASK) {
1978                 if (cs.dpl > cs_rpl)
1979                         return false;
1980         } else {
1981                 if (cs.dpl != cs_rpl)
1982                         return false;
1983         }
1984         if (!cs.present)
1985                 return false;
1986
1987         /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
1988         return true;
1989 }
1990
1991 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
1992 {
1993         struct kvm_segment ss;
1994         unsigned int ss_rpl;
1995
1996         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
1997         ss_rpl = ss.selector & SELECTOR_RPL_MASK;
1998
1999         if (ss.unusable)
2000                 return true;
2001         if (ss.type != 3 && ss.type != 7)
2002                 return false;
2003         if (!ss.s)
2004                 return false;
2005         if (ss.dpl != ss_rpl) /* DPL != RPL */
2006                 return false;
2007         if (!ss.present)
2008                 return false;
2009
2010         return true;
2011 }
2012
2013 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
2014 {
2015         struct kvm_segment var;
2016         unsigned int rpl;
2017
2018         vmx_get_segment(vcpu, &var, seg);
2019         rpl = var.selector & SELECTOR_RPL_MASK;
2020
2021         if (var.unusable)
2022                 return true;
2023         if (!var.s)
2024                 return false;
2025         if (!var.present)
2026                 return false;
2027         if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
2028                 if (var.dpl < rpl) /* DPL < RPL */
2029                         return false;
2030         }
2031
2032         /* TODO: Add other members to kvm_segment_field to allow checking for other access
2033          * rights flags
2034          */
2035         return true;
2036 }
2037
2038 static bool tr_valid(struct kvm_vcpu *vcpu)
2039 {
2040         struct kvm_segment tr;
2041
2042         vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
2043
2044         if (tr.unusable)
2045                 return false;
2046         if (tr.selector & SELECTOR_TI_MASK)     /* TI = 1 */
2047                 return false;
2048         if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
2049                 return false;
2050         if (!tr.present)
2051                 return false;
2052
2053         return true;
2054 }
2055
2056 static bool ldtr_valid(struct kvm_vcpu *vcpu)
2057 {
2058         struct kvm_segment ldtr;
2059
2060         vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
2061
2062         if (ldtr.unusable)
2063                 return true;
2064         if (ldtr.selector & SELECTOR_TI_MASK)   /* TI = 1 */
2065                 return false;
2066         if (ldtr.type != 2)
2067                 return false;
2068         if (!ldtr.present)
2069                 return false;
2070
2071         return true;
2072 }
2073
2074 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
2075 {
2076         struct kvm_segment cs, ss;
2077
2078         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
2079         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
2080
2081         return ((cs.selector & SELECTOR_RPL_MASK) ==
2082                  (ss.selector & SELECTOR_RPL_MASK));
2083 }
2084
2085 /*
2086  * Check if guest state is valid. Returns true if valid, false if
2087  * not.
2088  * We assume that registers are always usable
2089  */
2090 static bool guest_state_valid(struct kvm_vcpu *vcpu)
2091 {
2092         /* real mode guest state checks */
2093         if (!(vcpu->arch.cr0 & X86_CR0_PE)) {
2094                 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
2095                         return false;
2096                 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
2097                         return false;
2098                 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
2099                         return false;
2100                 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
2101                         return false;
2102                 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
2103                         return false;
2104                 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
2105                         return false;
2106         } else {
2107         /* protected mode guest state checks */
2108                 if (!cs_ss_rpl_check(vcpu))
2109                         return false;
2110                 if (!code_segment_valid(vcpu))
2111                         return false;
2112                 if (!stack_segment_valid(vcpu))
2113                         return false;
2114                 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
2115                         return false;
2116                 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
2117                         return false;
2118                 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
2119                         return false;
2120                 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
2121                         return false;
2122                 if (!tr_valid(vcpu))
2123                         return false;
2124                 if (!ldtr_valid(vcpu))
2125                         return false;
2126         }
2127         /* TODO:
2128          * - Add checks on RIP
2129          * - Add checks on RFLAGS
2130          */
2131
2132         return true;
2133 }
2134
2135 static int init_rmode_tss(struct kvm *kvm)
2136 {
2137         gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
2138         u16 data = 0;
2139         int ret = 0;
2140         int r;
2141
2142         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
2143         if (r < 0)
2144                 goto out;
2145         data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
2146         r = kvm_write_guest_page(kvm, fn++, &data,
2147                         TSS_IOPB_BASE_OFFSET, sizeof(u16));
2148         if (r < 0)
2149                 goto out;
2150         r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
2151         if (r < 0)
2152                 goto out;
2153         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
2154         if (r < 0)
2155                 goto out;
2156         data = ~0;
2157         r = kvm_write_guest_page(kvm, fn, &data,
2158                                  RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
2159                                  sizeof(u8));
2160         if (r < 0)
2161                 goto out;
2162
2163         ret = 1;
2164 out:
2165         return ret;
2166 }
2167
2168 static int init_rmode_identity_map(struct kvm *kvm)
2169 {
2170         int i, r, ret;
2171         pfn_t identity_map_pfn;
2172         u32 tmp;
2173
2174         if (!enable_ept)
2175                 return 1;
2176         if (unlikely(!kvm->arch.ept_identity_pagetable)) {
2177                 printk(KERN_ERR "EPT: identity-mapping pagetable "
2178                         "haven't been allocated!\n");
2179                 return 0;
2180         }
2181         if (likely(kvm->arch.ept_identity_pagetable_done))
2182                 return 1;
2183         ret = 0;
2184         identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
2185         r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
2186         if (r < 0)
2187                 goto out;
2188         /* Set up identity-mapping pagetable for EPT in real mode */
2189         for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
2190                 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
2191                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
2192                 r = kvm_write_guest_page(kvm, identity_map_pfn,
2193                                 &tmp, i * sizeof(tmp), sizeof(tmp));
2194                 if (r < 0)
2195                         goto out;
2196         }
2197         kvm->arch.ept_identity_pagetable_done = true;
2198         ret = 1;
2199 out:
2200         return ret;
2201 }
2202
2203 static void seg_setup(int seg)
2204 {
2205         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2206         unsigned int ar;
2207
2208         vmcs_write16(sf->selector, 0);
2209         vmcs_writel(sf->base, 0);
2210         vmcs_write32(sf->limit, 0xffff);
2211         if (enable_unrestricted_guest) {
2212                 ar = 0x93;
2213                 if (seg == VCPU_SREG_CS)
2214                         ar |= 0x08; /* code segment */
2215         } else
2216                 ar = 0xf3;
2217
2218         vmcs_write32(sf->ar_bytes, ar);
2219 }
2220
2221 static int alloc_apic_access_page(struct kvm *kvm)
2222 {
2223         struct kvm_userspace_memory_region kvm_userspace_mem;
2224         int r = 0;
2225
2226         down_write(&kvm->slots_lock);
2227         if (kvm->arch.apic_access_page)
2228                 goto out;
2229         kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
2230         kvm_userspace_mem.flags = 0;
2231         kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
2232         kvm_userspace_mem.memory_size = PAGE_SIZE;
2233         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2234         if (r)
2235                 goto out;
2236
2237         kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
2238 out:
2239         up_write(&kvm->slots_lock);
2240         return r;
2241 }
2242
2243 static int alloc_identity_pagetable(struct kvm *kvm)
2244 {
2245         struct kvm_userspace_memory_region kvm_userspace_mem;
2246         int r = 0;
2247
2248         down_write(&kvm->slots_lock);
2249         if (kvm->arch.ept_identity_pagetable)
2250                 goto out;
2251         kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
2252         kvm_userspace_mem.flags = 0;
2253         kvm_userspace_mem.guest_phys_addr =
2254                 kvm->arch.ept_identity_map_addr;
2255         kvm_userspace_mem.memory_size = PAGE_SIZE;
2256         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2257         if (r)
2258                 goto out;
2259
2260         kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
2261                         kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
2262 out:
2263         up_write(&kvm->slots_lock);
2264         return r;
2265 }
2266
2267 static void allocate_vpid(struct vcpu_vmx *vmx)
2268 {
2269         int vpid;
2270
2271         vmx->vpid = 0;
2272         if (!enable_vpid)
2273                 return;
2274         spin_lock(&vmx_vpid_lock);
2275         vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
2276         if (vpid < VMX_NR_VPIDS) {
2277                 vmx->vpid = vpid;
2278                 __set_bit(vpid, vmx_vpid_bitmap);
2279         }
2280         spin_unlock(&vmx_vpid_lock);
2281 }
2282
2283 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
2284 {
2285         int f = sizeof(unsigned long);
2286
2287         if (!cpu_has_vmx_msr_bitmap())
2288                 return;
2289
2290         /*
2291          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
2292          * have the write-low and read-high bitmap offsets the wrong way round.
2293          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
2294          */
2295         if (msr <= 0x1fff) {
2296                 __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
2297                 __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
2298         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2299                 msr &= 0x1fff;
2300                 __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
2301                 __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
2302         }
2303 }
2304
2305 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
2306 {
2307         if (!longmode_only)
2308                 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
2309         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
2310 }
2311
2312 /*
2313  * Sets up the vmcs for emulated real mode.
2314  */
2315 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2316 {
2317         u32 host_sysenter_cs, msr_low, msr_high;
2318         u32 junk;
2319         u64 host_pat, tsc_this, tsc_base;
2320         unsigned long a;
2321         struct descriptor_table dt;
2322         int i;
2323         unsigned long kvm_vmx_return;
2324         u32 exec_control;
2325
2326         /* I/O */
2327         vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
2328         vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
2329
2330         if (cpu_has_vmx_msr_bitmap())
2331                 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
2332
2333         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
2334
2335         /* Control */
2336         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
2337                 vmcs_config.pin_based_exec_ctrl);
2338
2339         exec_control = vmcs_config.cpu_based_exec_ctrl;
2340         if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
2341                 exec_control &= ~CPU_BASED_TPR_SHADOW;
2342 #ifdef CONFIG_X86_64
2343                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
2344                                 CPU_BASED_CR8_LOAD_EXITING;
2345 #endif
2346         }
2347         if (!enable_ept)
2348                 exec_control |= CPU_BASED_CR3_STORE_EXITING |
2349                                 CPU_BASED_CR3_LOAD_EXITING  |
2350                                 CPU_BASED_INVLPG_EXITING;
2351         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
2352
2353         if (cpu_has_secondary_exec_ctrls()) {
2354                 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
2355                 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2356                         exec_control &=
2357                                 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
2358                 if (vmx->vpid == 0)
2359                         exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
2360                 if (!enable_ept) {
2361                         exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
2362                         enable_unrestricted_guest = 0;
2363                 }
2364                 if (!enable_unrestricted_guest)
2365                         exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2366                 if (!ple_gap)
2367                         exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
2368                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2369         }
2370
2371         if (ple_gap) {
2372                 vmcs_write32(PLE_GAP, ple_gap);
2373                 vmcs_write32(PLE_WINDOW, ple_window);
2374         }
2375
2376         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
2377         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
2378         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
2379
2380         vmcs_writel(HOST_CR0, read_cr0());  /* 22.2.3 */
2381         vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
2382         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
2383
2384         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
2385         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
2386         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
2387         vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs());    /* 22.2.4 */
2388         vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs());    /* 22.2.4 */
2389         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
2390 #ifdef CONFIG_X86_64
2391         rdmsrl(MSR_FS_BASE, a);
2392         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
2393         rdmsrl(MSR_GS_BASE, a);
2394         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
2395 #else
2396         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
2397         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
2398 #endif
2399
2400         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
2401
2402         kvm_get_idt(&dt);
2403         vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
2404
2405         asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
2406         vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
2407         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
2408         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2409         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2410
2411         rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
2412         vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
2413         rdmsrl(MSR_IA32_SYSENTER_ESP, a);
2414         vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
2415         rdmsrl(MSR_IA32_SYSENTER_EIP, a);
2416         vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
2417
2418         if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
2419                 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2420                 host_pat = msr_low | ((u64) msr_high << 32);
2421                 vmcs_write64(HOST_IA32_PAT, host_pat);
2422         }
2423         if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2424                 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2425                 host_pat = msr_low | ((u64) msr_high << 32);
2426                 /* Write the default value follow host pat */
2427                 vmcs_write64(GUEST_IA32_PAT, host_pat);
2428                 /* Keep arch.pat sync with GUEST_IA32_PAT */
2429                 vmx->vcpu.arch.pat = host_pat;
2430         }
2431
2432         for (i = 0; i < NR_VMX_MSR; ++i) {
2433                 u32 index = vmx_msr_index[i];
2434                 u32 data_low, data_high;
2435                 int j = vmx->nmsrs;
2436
2437                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
2438                         continue;
2439                 if (wrmsr_safe(index, data_low, data_high) < 0)
2440                         continue;
2441                 vmx->guest_msrs[j].index = i;
2442                 vmx->guest_msrs[j].data = 0;
2443                 vmx->guest_msrs[j].mask = -1ull;
2444                 ++vmx->nmsrs;
2445         }
2446
2447         vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
2448
2449         /* 22.2.1, 20.8.1 */
2450         vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
2451
2452         vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
2453         vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
2454         if (enable_ept)
2455                 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
2456         vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
2457
2458         tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
2459         rdtscll(tsc_this);
2460         if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc)
2461                 tsc_base = tsc_this;
2462
2463         guest_write_tsc(0, tsc_base);
2464
2465         return 0;
2466 }
2467
2468 static int init_rmode(struct kvm *kvm)
2469 {
2470         if (!init_rmode_tss(kvm))
2471                 return 0;
2472         if (!init_rmode_identity_map(kvm))
2473                 return 0;
2474         return 1;
2475 }
2476
2477 static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2478 {
2479         struct vcpu_vmx *vmx = to_vmx(vcpu);
2480         u64 msr;
2481         int ret, idx;
2482
2483         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
2484         idx = srcu_read_lock(&vcpu->kvm->srcu);
2485         if (!init_rmode(vmx->vcpu.kvm)) {
2486                 ret = -ENOMEM;
2487                 goto out;
2488         }
2489
2490         vmx->rmode.vm86_active = 0;
2491
2492         vmx->soft_vnmi_blocked = 0;
2493
2494         vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
2495         kvm_set_cr8(&vmx->vcpu, 0);
2496         msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
2497         if (kvm_vcpu_is_bsp(&vmx->vcpu))
2498                 msr |= MSR_IA32_APICBASE_BSP;
2499         kvm_set_apic_base(&vmx->vcpu, msr);
2500
2501         fx_init(&vmx->vcpu);
2502
2503         seg_setup(VCPU_SREG_CS);
2504         /*
2505          * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
2506          * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
2507          */
2508         if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
2509                 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
2510                 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
2511         } else {
2512                 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
2513                 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
2514         }
2515
2516         seg_setup(VCPU_SREG_DS);
2517         seg_setup(VCPU_SREG_ES);
2518         seg_setup(VCPU_SREG_FS);
2519         seg_setup(VCPU_SREG_GS);
2520         seg_setup(VCPU_SREG_SS);
2521
2522         vmcs_write16(GUEST_TR_SELECTOR, 0);
2523         vmcs_writel(GUEST_TR_BASE, 0);
2524         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
2525         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2526
2527         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
2528         vmcs_writel(GUEST_LDTR_BASE, 0);
2529         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
2530         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
2531
2532         vmcs_write32(GUEST_SYSENTER_CS, 0);
2533         vmcs_writel(GUEST_SYSENTER_ESP, 0);
2534         vmcs_writel(GUEST_SYSENTER_EIP, 0);
2535
2536         vmcs_writel(GUEST_RFLAGS, 0x02);
2537         if (kvm_vcpu_is_bsp(&vmx->vcpu))
2538                 kvm_rip_write(vcpu, 0xfff0);
2539         else
2540                 kvm_rip_write(vcpu, 0);
2541         kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
2542
2543         vmcs_writel(GUEST_DR7, 0x400);
2544
2545         vmcs_writel(GUEST_GDTR_BASE, 0);
2546         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
2547
2548         vmcs_writel(GUEST_IDTR_BASE, 0);
2549         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
2550
2551         vmcs_write32(GUEST_ACTIVITY_STATE, 0);
2552         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
2553         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
2554
2555         /* Special registers */
2556         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
2557
2558         setup_msrs(vmx);
2559
2560         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
2561
2562         if (cpu_has_vmx_tpr_shadow()) {
2563                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
2564                 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
2565                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
2566                                 page_to_phys(vmx->vcpu.arch.apic->regs_page));
2567                 vmcs_write32(TPR_THRESHOLD, 0);
2568         }
2569
2570         if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2571                 vmcs_write64(APIC_ACCESS_ADDR,
2572                              page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
2573
2574         if (vmx->vpid != 0)
2575                 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2576
2577         vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
2578         vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
2579         vmx_set_cr4(&vmx->vcpu, 0);
2580         vmx_set_efer(&vmx->vcpu, 0);
2581         vmx_fpu_activate(&vmx->vcpu);
2582         update_exception_bitmap(&vmx->vcpu);
2583
2584         vpid_sync_vcpu_all(vmx);
2585
2586         ret = 0;
2587
2588         /* HACK: Don't enable emulation on guest boot/reset */
2589         vmx->emulation_required = 0;
2590
2591 out:
2592         srcu_read_unlock(&vcpu->kvm->srcu, idx);
2593         return ret;
2594 }
2595
2596 static void enable_irq_window(struct kvm_vcpu *vcpu)
2597 {
2598         u32 cpu_based_vm_exec_control;
2599
2600         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2601         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2602         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2603 }
2604
2605 static void enable_nmi_window(struct kvm_vcpu *vcpu)
2606 {
2607         u32 cpu_based_vm_exec_control;
2608
2609         if (!cpu_has_virtual_nmis()) {
2610                 enable_irq_window(vcpu);
2611                 return;
2612         }
2613
2614         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2615         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
2616         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2617 }
2618
2619 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
2620 {
2621         struct vcpu_vmx *vmx = to_vmx(vcpu);
2622         uint32_t intr;
2623         int irq = vcpu->arch.interrupt.nr;
2624
2625         trace_kvm_inj_virq(irq);
2626
2627         ++vcpu->stat.irq_injections;
2628         if (vmx->rmode.vm86_active) {
2629                 vmx->rmode.irq.pending = true;
2630                 vmx->rmode.irq.vector = irq;
2631                 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2632                 if (vcpu->arch.interrupt.soft)
2633                         vmx->rmode.irq.rip +=
2634                                 vmx->vcpu.arch.event_exit_inst_len;
2635                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2636                              irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
2637                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2638                 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2639                 return;
2640         }
2641         intr = irq | INTR_INFO_VALID_MASK;
2642         if (vcpu->arch.interrupt.soft) {
2643                 intr |= INTR_TYPE_SOFT_INTR;
2644                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2645                              vmx->vcpu.arch.event_exit_inst_len);
2646         } else
2647                 intr |= INTR_TYPE_EXT_INTR;
2648         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
2649 }
2650
2651 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2652 {
2653         struct vcpu_vmx *vmx = to_vmx(vcpu);
2654
2655         if (!cpu_has_virtual_nmis()) {
2656                 /*
2657                  * Tracking the NMI-blocked state in software is built upon
2658                  * finding the next open IRQ window. This, in turn, depends on
2659                  * well-behaving guests: They have to keep IRQs disabled at
2660                  * least as long as the NMI handler runs. Otherwise we may
2661                  * cause NMI nesting, maybe breaking the guest. But as this is
2662                  * highly unlikely, we can live with the residual risk.
2663                  */
2664                 vmx->soft_vnmi_blocked = 1;
2665                 vmx->vnmi_blocked_time = 0;
2666         }
2667
2668         ++vcpu->stat.nmi_injections;
2669         if (vmx->rmode.vm86_active) {
2670                 vmx->rmode.irq.pending = true;
2671                 vmx->rmode.irq.vector = NMI_VECTOR;
2672                 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2673                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2674                              NMI_VECTOR | INTR_TYPE_SOFT_INTR |
2675                              INTR_INFO_VALID_MASK);
2676                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2677                 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2678                 return;
2679         }
2680         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2681                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
2682 }
2683
2684 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
2685 {
2686         if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
2687                 return 0;
2688
2689         return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2690                         (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
2691                                 GUEST_INTR_STATE_NMI));
2692 }
2693
2694 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
2695 {
2696         if (!cpu_has_virtual_nmis())
2697                 return to_vmx(vcpu)->soft_vnmi_blocked;
2698         else
2699                 return !!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2700                           GUEST_INTR_STATE_NMI);
2701 }
2702
2703 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
2704 {
2705         struct vcpu_vmx *vmx = to_vmx(vcpu);
2706
2707         if (!cpu_has_virtual_nmis()) {
2708                 if (vmx->soft_vnmi_blocked != masked) {
2709                         vmx->soft_vnmi_blocked = masked;
2710                         vmx->vnmi_blocked_time = 0;
2711                 }
2712         } else {
2713                 if (masked)
2714                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
2715                                       GUEST_INTR_STATE_NMI);
2716                 else
2717                         vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
2718                                         GUEST_INTR_STATE_NMI);
2719         }
2720 }
2721
2722 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
2723 {
2724         return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2725                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2726                         (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
2727 }
2728
2729 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
2730 {
2731         int ret;
2732         struct kvm_userspace_memory_region tss_mem = {
2733                 .slot = TSS_PRIVATE_MEMSLOT,
2734                 .guest_phys_addr = addr,
2735                 .memory_size = PAGE_SIZE * 3,
2736                 .flags = 0,
2737         };
2738
2739         ret = kvm_set_memory_region(kvm, &tss_mem, 0);
2740         if (ret)
2741                 return ret;
2742         kvm->arch.tss_addr = addr;
2743         return 0;
2744 }
2745
2746 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2747                                   int vec, u32 err_code)
2748 {
2749         /*
2750          * Instruction with address size override prefix opcode 0x67
2751          * Cause the #SS fault with 0 error code in VM86 mode.
2752          */
2753         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
2754                 if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE)
2755                         return 1;
2756         /*
2757          * Forward all other exceptions that are valid in real mode.
2758          * FIXME: Breaks guest debugging in real mode, needs to be fixed with
2759          *        the required debugging infrastructure rework.
2760          */
2761         switch (vec) {
2762         case DB_VECTOR:
2763                 if (vcpu->guest_debug &
2764                     (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
2765                         return 0;
2766                 kvm_queue_exception(vcpu, vec);
2767                 return 1;
2768         case BP_VECTOR:
2769                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2770                         return 0;
2771                 /* fall through */
2772         case DE_VECTOR:
2773         case OF_VECTOR:
2774         case BR_VECTOR:
2775         case UD_VECTOR:
2776         case DF_VECTOR:
2777         case SS_VECTOR:
2778         case GP_VECTOR:
2779         case MF_VECTOR:
2780                 kvm_queue_exception(vcpu, vec);
2781                 return 1;
2782         }
2783         return 0;
2784 }
2785
2786 /*
2787  * Trigger machine check on the host. We assume all the MSRs are already set up
2788  * by the CPU and that we still run on the same CPU as the MCE occurred on.
2789  * We pass a fake environment to the machine check handler because we want
2790  * the guest to be always treated like user space, no matter what context
2791  * it used internally.
2792  */
2793 static void kvm_machine_check(void)
2794 {
2795 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
2796         struct pt_regs regs = {
2797                 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
2798                 .flags = X86_EFLAGS_IF,
2799         };
2800
2801         do_machine_check(&regs, 0);
2802 #endif
2803 }
2804
2805 static int handle_machine_check(struct kvm_vcpu *vcpu)
2806 {
2807         /* already handled by vcpu_run */
2808         return 1;
2809 }
2810
2811 static int handle_exception(struct kvm_vcpu *vcpu)
2812 {
2813         struct vcpu_vmx *vmx = to_vmx(vcpu);
2814         struct kvm_run *kvm_run = vcpu->run;
2815         u32 intr_info, ex_no, error_code;
2816         unsigned long cr2, rip, dr6;
2817         u32 vect_info;
2818         enum emulation_result er;
2819
2820         vect_info = vmx->idt_vectoring_info;
2821         intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2822
2823         if (is_machine_check(intr_info))
2824                 return handle_machine_check(vcpu);
2825
2826         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
2827             !is_page_fault(intr_info)) {
2828                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2829                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
2830                 vcpu->run->internal.ndata = 2;
2831                 vcpu->run->internal.data[0] = vect_info;
2832                 vcpu->run->internal.data[1] = intr_info;
2833                 return 0;
2834         }
2835
2836         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
2837                 return 1;  /* already handled by vmx_vcpu_run() */
2838
2839         if (is_no_device(intr_info)) {
2840                 vmx_fpu_activate(vcpu);
2841                 return 1;
2842         }
2843
2844         if (is_invalid_opcode(intr_info)) {
2845                 er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD);
2846                 if (er != EMULATE_DONE)
2847                         kvm_queue_exception(vcpu, UD_VECTOR);
2848                 return 1;
2849         }
2850
2851         error_code = 0;
2852         rip = kvm_rip_read(vcpu);
2853         if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
2854                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
2855         if (is_page_fault(intr_info)) {
2856                 /* EPT won't cause page fault directly */
2857                 if (enable_ept)
2858                         BUG();
2859                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
2860                 trace_kvm_page_fault(cr2, error_code);
2861
2862                 if (kvm_event_needs_reinjection(vcpu))
2863                         kvm_mmu_unprotect_page_virt(vcpu, cr2);
2864                 return kvm_mmu_page_fault(vcpu, cr2, error_code);
2865         }
2866
2867         if (vmx->rmode.vm86_active &&
2868             handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
2869                                                                 error_code)) {
2870                 if (vcpu->arch.halt_request) {
2871                         vcpu->arch.halt_request = 0;
2872                         return kvm_emulate_halt(vcpu);
2873                 }
2874                 return 1;
2875         }
2876
2877         ex_no = intr_info & INTR_INFO_VECTOR_MASK;
2878         switch (ex_no) {
2879         case DB_VECTOR:
2880                 dr6 = vmcs_readl(EXIT_QUALIFICATION);
2881                 if (!(vcpu->guest_debug &
2882                       (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
2883                         vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
2884                         kvm_queue_exception(vcpu, DB_VECTOR);
2885                         return 1;
2886                 }
2887                 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
2888                 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
2889                 /* fall through */
2890         case BP_VECTOR:
2891                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2892                 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
2893                 kvm_run->debug.arch.exception = ex_no;
2894                 break;
2895         default:
2896                 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
2897                 kvm_run->ex.exception = ex_no;
2898                 kvm_run->ex.error_code = error_code;
2899                 break;
2900         }
2901         return 0;
2902 }
2903
2904 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
2905 {
2906         ++vcpu->stat.irq_exits;
2907         return 1;
2908 }
2909
2910 static int handle_triple_fault(struct kvm_vcpu *vcpu)
2911 {
2912         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
2913         return 0;
2914 }
2915
2916 static int handle_io(struct kvm_vcpu *vcpu)
2917 {
2918         unsigned long exit_qualification;
2919         int size, in, string;
2920         unsigned port;
2921
2922         ++vcpu->stat.io_exits;
2923         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2924         string = (exit_qualification & 16) != 0;
2925
2926         if (string) {
2927                 if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO)
2928                         return 0;
2929                 return 1;
2930         }
2931
2932         size = (exit_qualification & 7) + 1;
2933         in = (exit_qualification & 8) != 0;
2934         port = exit_qualification >> 16;
2935
2936         skip_emulated_instruction(vcpu);
2937         return kvm_emulate_pio(vcpu, in, size, port);
2938 }
2939
2940 static void
2941 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2942 {
2943         /*
2944          * Patch in the VMCALL instruction:
2945          */
2946         hypercall[0] = 0x0f;
2947         hypercall[1] = 0x01;
2948         hypercall[2] = 0xc1;
2949 }
2950
2951 static int handle_cr(struct kvm_vcpu *vcpu)
2952 {
2953         unsigned long exit_qualification, val;
2954         int cr;
2955         int reg;
2956
2957         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2958         cr = exit_qualification & 15;
2959         reg = (exit_qualification >> 8) & 15;
2960         switch ((exit_qualification >> 4) & 3) {
2961         case 0: /* mov to cr */
2962                 val = kvm_register_read(vcpu, reg);
2963                 trace_kvm_cr_write(cr, val);
2964                 switch (cr) {
2965                 case 0:
2966                         kvm_set_cr0(vcpu, val);
2967                         skip_emulated_instruction(vcpu);
2968                         return 1;
2969                 case 3:
2970                         kvm_set_cr3(vcpu, val);
2971                         skip_emulated_instruction(vcpu);
2972                         return 1;
2973                 case 4:
2974                         kvm_set_cr4(vcpu, val);
2975                         skip_emulated_instruction(vcpu);
2976                         return 1;
2977                 case 8: {
2978                                 u8 cr8_prev = kvm_get_cr8(vcpu);
2979                                 u8 cr8 = kvm_register_read(vcpu, reg);
2980                                 kvm_set_cr8(vcpu, cr8);
2981                                 skip_emulated_instruction(vcpu);
2982                                 if (irqchip_in_kernel(vcpu->kvm))
2983                                         return 1;
2984                                 if (cr8_prev <= cr8)
2985                                         return 1;
2986                                 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
2987                                 return 0;
2988                         }
2989                 };
2990                 break;
2991         case 2: /* clts */
2992                 vmx_fpu_deactivate(vcpu);
2993                 vcpu->arch.cr0 &= ~X86_CR0_TS;
2994                 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
2995                 vmx_fpu_activate(vcpu);
2996                 skip_emulated_instruction(vcpu);
2997                 return 1;
2998         case 1: /*mov from cr*/
2999                 switch (cr) {
3000                 case 3:
3001                         kvm_register_write(vcpu, reg, vcpu->arch.cr3);
3002                         trace_kvm_cr_read(cr, vcpu->arch.cr3);
3003                         skip_emulated_instruction(vcpu);
3004                         return 1;
3005                 case 8:
3006                         val = kvm_get_cr8(vcpu);
3007                         kvm_register_write(vcpu, reg, val);
3008                         trace_kvm_cr_read(cr, val);
3009                         skip_emulated_instruction(vcpu);
3010                         return 1;
3011                 }
3012                 break;
3013         case 3: /* lmsw */
3014                 kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
3015
3016                 skip_emulated_instruction(vcpu);
3017                 return 1;
3018         default:
3019                 break;
3020         }
3021         vcpu->run->exit_reason = 0;
3022         pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
3023                (int)(exit_qualification >> 4) & 3, cr);
3024         return 0;
3025 }
3026
3027 static int handle_dr(struct kvm_vcpu *vcpu)
3028 {
3029         unsigned long exit_qualification;
3030         unsigned long val;
3031         int dr, reg;
3032
3033         if (!kvm_require_cpl(vcpu, 0))
3034                 return 1;
3035         dr = vmcs_readl(GUEST_DR7);
3036         if (dr & DR7_GD) {
3037                 /*
3038                  * As the vm-exit takes precedence over the debug trap, we
3039                  * need to emulate the latter, either for the host or the
3040                  * guest debugging itself.
3041                  */
3042                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
3043                         vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
3044                         vcpu->run->debug.arch.dr7 = dr;
3045                         vcpu->run->debug.arch.pc =
3046                                 vmcs_readl(GUEST_CS_BASE) +
3047                                 vmcs_readl(GUEST_RIP);
3048                         vcpu->run->debug.arch.exception = DB_VECTOR;
3049                         vcpu->run->exit_reason = KVM_EXIT_DEBUG;
3050                         return 0;
3051                 } else {
3052                         vcpu->arch.dr7 &= ~DR7_GD;
3053                         vcpu->arch.dr6 |= DR6_BD;
3054                         vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
3055                         kvm_queue_exception(vcpu, DB_VECTOR);
3056                         return 1;
3057                 }
3058         }
3059
3060         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3061         dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
3062         reg = DEBUG_REG_ACCESS_REG(exit_qualification);
3063         if (exit_qualification & TYPE_MOV_FROM_DR) {
3064                 switch (dr) {
3065                 case 0 ... 3:
3066                         val = vcpu->arch.db[dr];
3067                         break;
3068                 case 6:
3069                         val = vcpu->arch.dr6;
3070                         break;
3071                 case 7:
3072                         val = vcpu->arch.dr7;
3073                         break;
3074                 default:
3075                         val = 0;
3076                 }
3077                 kvm_register_write(vcpu, reg, val);
3078         } else {
3079                 val = vcpu->arch.regs[reg];
3080                 switch (dr) {
3081                 case 0 ... 3:
3082                         vcpu->arch.db[dr] = val;
3083                         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
3084                                 vcpu->arch.eff_db[dr] = val;
3085                         break;
3086                 case 4 ... 5:
3087                         if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
3088                                 kvm_queue_exception(vcpu, UD_VECTOR);
3089                         break;
3090                 case 6:
3091                         if (val & 0xffffffff00000000ULL) {
3092                                 kvm_queue_exception(vcpu, GP_VECTOR);
3093                                 break;
3094                         }
3095                         vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
3096                         break;
3097                 case 7:
3098                         if (val & 0xffffffff00000000ULL) {
3099                                 kvm_queue_exception(vcpu, GP_VECTOR);
3100                                 break;
3101                         }
3102                         vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
3103                         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
3104                                 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
3105                                 vcpu->arch.switch_db_regs =
3106                                         (val & DR7_BP_EN_MASK);
3107                         }
3108                         break;
3109                 }
3110         }
3111         skip_emulated_instruction(vcpu);
3112         return 1;
3113 }
3114
3115 static int handle_cpuid(struct kvm_vcpu *vcpu)
3116 {
3117         kvm_emulate_cpuid(vcpu);
3118         return 1;
3119 }
3120
3121 static int handle_rdmsr(struct kvm_vcpu *vcpu)
3122 {
3123         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3124         u64 data;
3125
3126         if (vmx_get_msr(vcpu, ecx, &data)) {
3127                 kvm_inject_gp(vcpu, 0);
3128                 return 1;
3129         }
3130
3131         trace_kvm_msr_read(ecx, data);
3132
3133         /* FIXME: handling of bits 32:63 of rax, rdx */
3134         vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
3135         vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
3136         skip_emulated_instruction(vcpu);
3137         return 1;
3138 }
3139
3140 static int handle_wrmsr(struct kvm_vcpu *vcpu)
3141 {
3142         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3143         u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
3144                 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
3145
3146         trace_kvm_msr_write(ecx, data);
3147
3148         if (vmx_set_msr(vcpu, ecx, data) != 0) {
3149                 kvm_inject_gp(vcpu, 0);
3150                 return 1;
3151         }
3152
3153         skip_emulated_instruction(vcpu);
3154         return 1;
3155 }
3156
3157 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
3158 {
3159         return 1;
3160 }
3161
3162 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
3163 {
3164         u32 cpu_based_vm_exec_control;
3165
3166         /* clear pending irq */
3167         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3168         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
3169         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3170
3171         ++vcpu->stat.irq_window_exits;
3172
3173         /*
3174          * If the user space waits to inject interrupts, exit as soon as
3175          * possible
3176          */
3177         if (!irqchip_in_kernel(vcpu->kvm) &&
3178             vcpu->run->request_interrupt_window &&
3179             !kvm_cpu_has_interrupt(vcpu)) {
3180                 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
3181                 return 0;
3182         }
3183         return 1;
3184 }
3185
3186 static int handle_halt(struct kvm_vcpu *vcpu)
3187 {
3188         skip_emulated_instruction(vcpu);
3189         return kvm_emulate_halt(vcpu);
3190 }
3191
3192 static int handle_vmcall(struct kvm_vcpu *vcpu)
3193 {
3194         skip_emulated_instruction(vcpu);
3195         kvm_emulate_hypercall(vcpu);
3196         return 1;
3197 }
3198
3199 static int handle_vmx_insn(struct kvm_vcpu *vcpu)
3200 {
3201         kvm_queue_exception(vcpu, UD_VECTOR);
3202         return 1;
3203 }
3204
3205 static int handle_invlpg(struct kvm_vcpu *vcpu)
3206 {
3207         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3208
3209         kvm_mmu_invlpg(vcpu, exit_qualification);
3210         skip_emulated_instruction(vcpu);
3211         return 1;
3212 }
3213
3214 static int handle_wbinvd(struct kvm_vcpu *vcpu)
3215 {
3216         skip_emulated_instruction(vcpu);
3217         /* TODO: Add support for VT-d/pass-through device */
3218         return 1;
3219 }
3220
3221 static int handle_apic_access(struct kvm_vcpu *vcpu)
3222 {
3223         unsigned long exit_qualification;
3224         enum emulation_result er;
3225         unsigned long offset;
3226
3227         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3228         offset = exit_qualification & 0xffful;
3229
3230         er = emulate_instruction(vcpu, 0, 0, 0);
3231
3232         if (er !=  EMULATE_DONE) {
3233                 printk(KERN_ERR
3234                        "Fail to handle apic access vmexit! Offset is 0x%lx\n",
3235                        offset);
3236                 return -ENOEXEC;
3237         }
3238         return 1;
3239 }
3240
3241 static int handle_task_switch(struct kvm_vcpu *vcpu)
3242 {
3243         struct vcpu_vmx *vmx = to_vmx(vcpu);
3244         unsigned long exit_qualification;
3245         u16 tss_selector;
3246         int reason, type, idt_v;
3247
3248         idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
3249         type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
3250
3251         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3252
3253         reason = (u32)exit_qualification >> 30;
3254         if (reason == TASK_SWITCH_GATE && idt_v) {
3255                 switch (type) {
3256                 case INTR_TYPE_NMI_INTR:
3257                         vcpu->arch.nmi_injected = false;
3258                         if (cpu_has_virtual_nmis())
3259                                 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3260                                               GUEST_INTR_STATE_NMI);
3261                         break;
3262                 case INTR_TYPE_EXT_INTR:
3263                 case INTR_TYPE_SOFT_INTR:
3264                         kvm_clear_interrupt_queue(vcpu);
3265                         break;
3266                 case INTR_TYPE_HARD_EXCEPTION:
3267                 case INTR_TYPE_SOFT_EXCEPTION:
3268                         kvm_clear_exception_queue(vcpu);
3269                         break;
3270                 default:
3271                         break;
3272                 }
3273         }
3274         tss_selector = exit_qualification;
3275
3276         if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
3277                        type != INTR_TYPE_EXT_INTR &&
3278                        type != INTR_TYPE_NMI_INTR))
3279                 skip_emulated_instruction(vcpu);
3280
3281         if (!kvm_task_switch(vcpu, tss_selector, reason))
3282                 return 0;
3283
3284         /* clear all local breakpoint enable flags */
3285         vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
3286
3287         /*
3288          * TODO: What about debug traps on tss switch?
3289          *       Are we supposed to inject them and update dr6?
3290          */
3291
3292         return 1;
3293 }
3294
3295 static int handle_ept_violation(struct kvm_vcpu *vcpu)
3296 {
3297         unsigned long exit_qualification;
3298         gpa_t gpa;
3299         int gla_validity;
3300
3301         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3302
3303         if (exit_qualification & (1 << 6)) {
3304                 printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
3305                 return -EINVAL;
3306         }
3307
3308         gla_validity = (exit_qualification >> 7) & 0x3;
3309         if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
3310                 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
3311                 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
3312                         (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
3313                         vmcs_readl(GUEST_LINEAR_ADDRESS));
3314                 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
3315                         (long unsigned int)exit_qualification);
3316                 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
3317                 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
3318                 return 0;
3319         }
3320
3321         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3322         trace_kvm_page_fault(gpa, exit_qualification);
3323         return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
3324 }
3325
3326 static u64 ept_rsvd_mask(u64 spte, int level)
3327 {
3328         int i;
3329         u64 mask = 0;
3330
3331         for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
3332                 mask |= (1ULL << i);
3333
3334         if (level > 2)
3335                 /* bits 7:3 reserved */
3336                 mask |= 0xf8;
3337         else if (level == 2) {
3338                 if (spte & (1ULL << 7))
3339                         /* 2MB ref, bits 20:12 reserved */
3340                         mask |= 0x1ff000;
3341                 else
3342                         /* bits 6:3 reserved */
3343                         mask |= 0x78;
3344         }
3345
3346         return mask;
3347 }
3348
3349 static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
3350                                        int level)
3351 {
3352         printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
3353
3354         /* 010b (write-only) */
3355         WARN_ON((spte & 0x7) == 0x2);
3356
3357         /* 110b (write/execute) */
3358         WARN_ON((spte & 0x7) == 0x6);
3359
3360         /* 100b (execute-only) and value not supported by logical processor */
3361         if (!cpu_has_vmx_ept_execute_only())
3362                 WARN_ON((spte & 0x7) == 0x4);
3363
3364         /* not 000b */
3365         if ((spte & 0x7)) {
3366                 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
3367
3368                 if (rsvd_bits != 0) {
3369                         printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
3370                                          __func__, rsvd_bits);
3371                         WARN_ON(1);
3372                 }
3373
3374                 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
3375                         u64 ept_mem_type = (spte & 0x38) >> 3;
3376
3377                         if (ept_mem_type == 2 || ept_mem_type == 3 ||
3378                             ept_mem_type == 7) {
3379                                 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
3380                                                 __func__, ept_mem_type);
3381                                 WARN_ON(1);
3382                         }
3383                 }
3384         }
3385 }
3386
3387 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
3388 {
3389         u64 sptes[4];
3390         int nr_sptes, i;
3391         gpa_t gpa;
3392
3393         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3394
3395         printk(KERN_ERR "EPT: Misconfiguration.\n");
3396         printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
3397
3398         nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
3399
3400         for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
3401                 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
3402
3403         vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
3404         vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
3405
3406         return 0;
3407 }
3408
3409 static int handle_nmi_window(struct kvm_vcpu *vcpu)
3410 {
3411         u32 cpu_based_vm_exec_control;
3412
3413         /* clear pending NMI */
3414         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3415         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
3416         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3417         ++vcpu->stat.nmi_window_exits;
3418
3419         return 1;
3420 }
3421
3422 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
3423 {
3424         struct vcpu_vmx *vmx = to_vmx(vcpu);
3425         enum emulation_result err = EMULATE_DONE;
3426         int ret = 1;
3427
3428         while (!guest_state_valid(vcpu)) {
3429                 err = emulate_instruction(vcpu, 0, 0, 0);
3430
3431                 if (err == EMULATE_DO_MMIO) {
3432                         ret = 0;
3433                         goto out;
3434                 }
3435
3436                 if (err != EMULATE_DONE) {
3437                         kvm_report_emulation_failure(vcpu, "emulation failure");
3438                         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3439                         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3440                         vcpu->run->internal.ndata = 0;
3441                         ret = 0;
3442                         goto out;
3443                 }
3444
3445                 if (signal_pending(current))
3446                         goto out;
3447                 if (need_resched())
3448                         schedule();
3449         }
3450
3451         vmx->emulation_required = 0;
3452 out:
3453         return ret;
3454 }
3455
3456 /*
3457  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
3458  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
3459  */
3460 static int handle_pause(struct kvm_vcpu *vcpu)
3461 {
3462         skip_emulated_instruction(vcpu);
3463         kvm_vcpu_on_spin(vcpu);
3464
3465         return 1;
3466 }
3467
3468 static int handle_invalid_op(struct kvm_vcpu *vcpu)
3469 {
3470         kvm_queue_exception(vcpu, UD_VECTOR);
3471         return 1;
3472 }
3473
3474 /*
3475  * The exit handlers return 1 if the exit was handled fully and guest execution
3476  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
3477  * to be done to userspace and return 0.
3478  */
3479 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
3480         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
3481         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
3482         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
3483         [EXIT_REASON_NMI_WINDOW]              = handle_nmi_window,
3484         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
3485         [EXIT_REASON_CR_ACCESS]               = handle_cr,
3486         [EXIT_REASON_DR_ACCESS]               = handle_dr,
3487         [EXIT_REASON_CPUID]                   = handle_cpuid,
3488         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
3489         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
3490         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
3491         [EXIT_REASON_HLT]                     = handle_halt,
3492         [EXIT_REASON_INVLPG]                  = handle_invlpg,
3493         [EXIT_REASON_VMCALL]                  = handle_vmcall,
3494         [EXIT_REASON_VMCLEAR]                 = handle_vmx_insn,
3495         [EXIT_REASON_VMLAUNCH]                = handle_vmx_insn,
3496         [EXIT_REASON_VMPTRLD]                 = handle_vmx_insn,
3497         [EXIT_REASON_VMPTRST]                 = handle_vmx_insn,
3498         [EXIT_REASON_VMREAD]                  = handle_vmx_insn,
3499         [EXIT_REASON_VMRESUME]                = handle_vmx_insn,
3500         [EXIT_REASON_VMWRITE]                 = handle_vmx_insn,
3501         [EXIT_REASON_VMOFF]                   = handle_vmx_insn,
3502         [EXIT_REASON_VMON]                    = handle_vmx_insn,
3503         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
3504         [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
3505         [EXIT_REASON_WBINVD]                  = handle_wbinvd,
3506         [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
3507         [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
3508         [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
3509         [EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
3510         [EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
3511         [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_invalid_op,
3512         [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_invalid_op,
3513 };
3514
3515 static const int kvm_vmx_max_exit_handlers =
3516         ARRAY_SIZE(kvm_vmx_exit_handlers);
3517
3518 /*
3519  * The guest has exited.  See if we can fix it or if we need userspace
3520  * assistance.
3521  */
3522 static int vmx_handle_exit(struct kvm_vcpu *vcpu)
3523 {
3524         struct vcpu_vmx *vmx = to_vmx(vcpu);
3525         u32 exit_reason = vmx->exit_reason;
3526         u32 vectoring_info = vmx->idt_vectoring_info;
3527
3528         trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
3529
3530         /* If guest state is invalid, start emulating */
3531         if (vmx->emulation_required && emulate_invalid_guest_state)
3532                 return handle_invalid_guest_state(vcpu);
3533
3534         /* Access CR3 don't cause VMExit in paging mode, so we need
3535          * to sync with guest real CR3. */
3536         if (enable_ept && is_paging(vcpu))
3537                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3538
3539         if (unlikely(vmx->fail)) {
3540                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3541                 vcpu->run->fail_entry.hardware_entry_failure_reason
3542                         = vmcs_read32(VM_INSTRUCTION_ERROR);
3543                 return 0;
3544         }
3545
3546         if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
3547                         (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
3548                         exit_reason != EXIT_REASON_EPT_VIOLATION &&
3549                         exit_reason != EXIT_REASON_TASK_SWITCH))
3550                 printk(KERN_WARNING "%s: unexpected, valid vectoring info "
3551                        "(0x%x) and exit reason is 0x%x\n",
3552                        __func__, vectoring_info, exit_reason);
3553
3554         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
3555                 if (vmx_interrupt_allowed(vcpu)) {
3556                         vmx->soft_vnmi_blocked = 0;
3557                 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
3558                            vcpu->arch.nmi_pending) {
3559                         /*
3560                          * This CPU don't support us in finding the end of an
3561                          * NMI-blocked window if the guest runs with IRQs
3562                          * disabled. So we pull the trigger after 1 s of
3563                          * futile waiting, but inform the user about this.
3564                          */
3565                         printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
3566                                "state on VCPU %d after 1 s timeout\n",
3567                                __func__, vcpu->vcpu_id);
3568                         vmx->soft_vnmi_blocked = 0;
3569                 }
3570         }
3571
3572         if (exit_reason < kvm_vmx_max_exit_handlers
3573             && kvm_vmx_exit_handlers[exit_reason])
3574                 return kvm_vmx_exit_handlers[exit_reason](vcpu);
3575         else {
3576                 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
3577                 vcpu->run->hw.hardware_exit_reason = exit_reason;
3578         }
3579         return 0;
3580 }
3581
3582 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3583 {
3584         if (irr == -1 || tpr < irr) {
3585                 vmcs_write32(TPR_THRESHOLD, 0);
3586                 return;
3587         }
3588
3589         vmcs_write32(TPR_THRESHOLD, irr);
3590 }
3591
3592 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3593 {
3594         u32 exit_intr_info;
3595         u32 idt_vectoring_info = vmx->idt_vectoring_info;
3596         bool unblock_nmi;
3597         u8 vector;
3598         int type;
3599         bool idtv_info_valid;
3600
3601         exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
3602
3603         vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
3604
3605         /* Handle machine checks before interrupts are enabled */
3606         if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
3607             || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI
3608                 && is_machine_check(exit_intr_info)))
3609                 kvm_machine_check();
3610
3611         /* We need to handle NMIs before interrupts are enabled */
3612         if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
3613             (exit_intr_info & INTR_INFO_VALID_MASK))
3614                 asm("int $2");
3615
3616         idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
3617
3618         if (cpu_has_virtual_nmis()) {
3619                 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
3620                 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
3621                 /*
3622                  * SDM 3: 27.7.1.2 (September 2008)
3623                  * Re-set bit "block by NMI" before VM entry if vmexit caused by
3624                  * a guest IRET fault.
3625                  * SDM 3: 23.2.2 (September 2008)
3626                  * Bit 12 is undefined in any of the following cases:
3627                  *  If the VM exit sets the valid bit in the IDT-vectoring
3628                  *   information field.
3629                  *  If the VM exit is due to a double fault.
3630                  */
3631                 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
3632                     vector != DF_VECTOR && !idtv_info_valid)
3633                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3634                                       GUEST_INTR_STATE_NMI);
3635         } else if (unlikely(vmx->soft_vnmi_blocked))
3636                 vmx->vnmi_blocked_time +=
3637                         ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
3638
3639         vmx->vcpu.arch.nmi_injected = false;
3640         kvm_clear_exception_queue(&vmx->vcpu);
3641         kvm_clear_interrupt_queue(&vmx->vcpu);
3642
3643         if (!idtv_info_valid)
3644                 return;
3645
3646         vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
3647         type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
3648
3649         switch (type) {
3650         case INTR_TYPE_NMI_INTR:
3651                 vmx->vcpu.arch.nmi_injected = true;
3652                 /*
3653                  * SDM 3: 27.7.1.2 (September 2008)
3654                  * Clear bit "block by NMI" before VM entry if a NMI
3655                  * delivery faulted.
3656                  */
3657                 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3658                                 GUEST_INTR_STATE_NMI);
3659                 break;
3660         case INTR_TYPE_SOFT_EXCEPTION:
3661                 vmx->vcpu.arch.event_exit_inst_len =
3662                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3663                 /* fall through */
3664         case INTR_TYPE_HARD_EXCEPTION:
3665                 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
3666                         u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE);
3667                         kvm_queue_exception_e(&vmx->vcpu, vector, err);
3668                 } else
3669                         kvm_queue_exception(&vmx->vcpu, vector);
3670                 break;
3671         case INTR_TYPE_SOFT_INTR:
3672                 vmx->vcpu.arch.event_exit_inst_len =
3673                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3674                 /* fall through */
3675         case INTR_TYPE_EXT_INTR:
3676                 kvm_queue_interrupt(&vmx->vcpu, vector,
3677                         type == INTR_TYPE_SOFT_INTR);
3678                 break;
3679         default:
3680                 break;
3681         }
3682 }
3683
3684 /*
3685  * Failure to inject an interrupt should give us the information
3686  * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
3687  * when fetching the interrupt redirection bitmap in the real-mode
3688  * tss, this doesn't happen.  So we do it ourselves.
3689  */
3690 static void fixup_rmode_irq(struct vcpu_vmx *vmx)
3691 {
3692         vmx->rmode.irq.pending = 0;
3693         if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
3694                 return;
3695         kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
3696         if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
3697                 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
3698                 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
3699                 return;
3700         }
3701         vmx->idt_vectoring_info =
3702                 VECTORING_INFO_VALID_MASK
3703                 | INTR_TYPE_EXT_INTR
3704                 | vmx->rmode.irq.vector;
3705 }
3706
3707 #ifdef CONFIG_X86_64
3708 #define R "r"
3709 #define Q "q"
3710 #else
3711 #define R "e"
3712 #define Q "l"
3713 #endif
3714
3715 static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
3716 {
3717         struct vcpu_vmx *vmx = to_vmx(vcpu);
3718
3719         /* Record the guest's net vcpu time for enforced NMI injections. */
3720         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
3721                 vmx->entry_time = ktime_get();
3722
3723         /* Don't enter VMX if guest state is invalid, let the exit handler
3724            start emulation until we arrive back to a valid state */
3725         if (vmx->emulation_required && emulate_invalid_guest_state)
3726                 return;
3727
3728         if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
3729                 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
3730         if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
3731                 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
3732
3733         /* When single-stepping over STI and MOV SS, we must clear the
3734          * corresponding interruptibility bits in the guest state. Otherwise
3735          * vmentry fails as it then expects bit 14 (BS) in pending debug
3736          * exceptions being set, but that's not correct for the guest debugging
3737          * case. */
3738         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3739                 vmx_set_interrupt_shadow(vcpu, 0);
3740
3741         /*
3742          * Loading guest fpu may have cleared host cr0.ts
3743          */
3744         vmcs_writel(HOST_CR0, read_cr0());
3745
3746         if (vcpu->arch.switch_db_regs)
3747                 set_debugreg(vcpu->arch.dr6, 6);
3748
3749         asm(
3750                 /* Store host registers */
3751                 "push %%"R"dx; push %%"R"bp;"
3752                 "push %%"R"cx \n\t"
3753                 "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
3754                 "je 1f \n\t"
3755                 "mov %%"R"sp, %c[host_rsp](%0) \n\t"
3756                 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
3757                 "1: \n\t"
3758                 /* Reload cr2 if changed */
3759                 "mov %c[cr2](%0), %%"R"ax \n\t"
3760                 "mov %%cr2, %%"R"dx \n\t"
3761                 "cmp %%"R"ax, %%"R"dx \n\t"
3762                 "je 2f \n\t"
3763                 "mov %%"R"ax, %%cr2 \n\t"
3764                 "2: \n\t"
3765                 /* Check if vmlaunch of vmresume is needed */
3766                 "cmpl $0, %c[launched](%0) \n\t"
3767                 /* Load guest registers.  Don't clobber flags. */
3768                 "mov %c[rax](%0), %%"R"ax \n\t"
3769                 "mov %c[rbx](%0), %%"R"bx \n\t"
3770                 "mov %c[rdx](%0), %%"R"dx \n\t"
3771                 "mov %c[rsi](%0), %%"R"si \n\t"
3772                 "mov %c[rdi](%0), %%"R"di \n\t"
3773                 "mov %c[rbp](%0), %%"R"bp \n\t"
3774 #ifdef CONFIG_X86_64
3775                 "mov %c[r8](%0),  %%r8  \n\t"
3776                 "mov %c[r9](%0),  %%r9  \n\t"
3777                 "mov %c[r10](%0), %%r10 \n\t"
3778                 "mov %c[r11](%0), %%r11 \n\t"
3779                 "mov %c[r12](%0), %%r12 \n\t"
3780                 "mov %c[r13](%0), %%r13 \n\t"
3781                 "mov %c[r14](%0), %%r14 \n\t"
3782                 "mov %c[r15](%0), %%r15 \n\t"
3783 #endif
3784                 "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
3785
3786                 /* Enter guest mode */
3787                 "jne .Llaunched \n\t"
3788                 __ex(ASM_VMX_VMLAUNCH) "\n\t"
3789                 "jmp .Lkvm_vmx_return \n\t"
3790                 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
3791                 ".Lkvm_vmx_return: "
3792                 /* Save guest registers, load host registers, keep flags */
3793                 "xchg %0,     (%%"R"sp) \n\t"
3794                 "mov %%"R"ax, %c[rax](%0) \n\t"
3795                 "mov %%"R"bx, %c[rbx](%0) \n\t"
3796                 "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t"
3797                 "mov %%"R"dx, %c[rdx](%0) \n\t"
3798                 "mov %%"R"si, %c[rsi](%0) \n\t"
3799                 "mov %%"R"di, %c[rdi](%0) \n\t"
3800                 "mov %%"R"bp, %c[rbp](%0) \n\t"
3801 #ifdef CONFIG_X86_64
3802                 "mov %%r8,  %c[r8](%0) \n\t"
3803                 "mov %%r9,  %c[r9](%0) \n\t"
3804                 "mov %%r10, %c[r10](%0) \n\t"
3805                 "mov %%r11, %c[r11](%0) \n\t"
3806                 "mov %%r12, %c[r12](%0) \n\t"
3807                 "mov %%r13, %c[r13](%0) \n\t"
3808                 "mov %%r14, %c[r14](%0) \n\t"
3809                 "mov %%r15, %c[r15](%0) \n\t"
3810 #endif
3811                 "mov %%cr2, %%"R"ax   \n\t"
3812                 "mov %%"R"ax, %c[cr2](%0) \n\t"
3813
3814                 "pop  %%"R"bp; pop  %%"R"bp; pop  %%"R"dx \n\t"
3815                 "setbe %c[fail](%0) \n\t"
3816               : : "c"(vmx), "d"((unsigned long)HOST_RSP),
3817                 [launched]"i"(offsetof(struct vcpu_vmx, launched)),
3818                 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
3819                 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
3820                 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
3821                 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
3822                 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
3823                 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
3824                 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
3825                 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
3826                 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
3827 #ifdef CONFIG_X86_64
3828                 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
3829                 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
3830                 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
3831                 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
3832                 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
3833                 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
3834                 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
3835                 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
3836 #endif
3837                 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
3838               : "cc", "memory"
3839                 , R"bx", R"di", R"si"
3840 #ifdef CONFIG_X86_64
3841                 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
3842 #endif
3843               );
3844
3845         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
3846                                   | (1 << VCPU_EXREG_PDPTR));
3847         vcpu->arch.regs_dirty = 0;
3848
3849         if (vcpu->arch.switch_db_regs)
3850                 get_debugreg(vcpu->arch.dr6, 6);
3851
3852         vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
3853         if (vmx->rmode.irq.pending)
3854                 fixup_rmode_irq(vmx);
3855
3856         asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
3857         vmx->launched = 1;
3858
3859         vmx_complete_interrupts(vmx);
3860 }
3861
3862 #undef R
3863 #undef Q
3864
3865 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
3866 {
3867         struct vcpu_vmx *vmx = to_vmx(vcpu);
3868
3869         if (vmx->vmcs) {
3870                 vcpu_clear(vmx);
3871                 free_vmcs(vmx->vmcs);
3872                 vmx->vmcs = NULL;
3873         }
3874 }
3875
3876 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
3877 {
3878         struct vcpu_vmx *vmx = to_vmx(vcpu);
3879
3880         spin_lock(&vmx_vpid_lock);
3881         if (vmx->vpid != 0)
3882                 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
3883         spin_unlock(&vmx_vpid_lock);
3884         vmx_free_vmcs(vcpu);
3885         kfree(vmx->guest_msrs);
3886         kvm_vcpu_uninit(vcpu);
3887         kmem_cache_free(kvm_vcpu_cache, vmx);
3888 }
3889
3890 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
3891 {
3892         int err;
3893         struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
3894         int cpu;
3895
3896         if (!vmx)
3897                 return ERR_PTR(-ENOMEM);
3898
3899         allocate_vpid(vmx);
3900
3901         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
3902         if (err)
3903                 goto free_vcpu;
3904
3905         vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
3906         if (!vmx->guest_msrs) {
3907                 err = -ENOMEM;
3908                 goto uninit_vcpu;
3909         }
3910
3911         vmx->vmcs = alloc_vmcs();
3912         if (!vmx->vmcs)
3913                 goto free_msrs;
3914
3915         vmcs_clear(vmx->vmcs);
3916
3917         cpu = get_cpu();
3918         vmx_vcpu_load(&vmx->vcpu, cpu);
3919         err = vmx_vcpu_setup(vmx);
3920         vmx_vcpu_put(&vmx->vcpu);
3921         put_cpu();
3922         if (err)
3923                 goto free_vmcs;
3924         if (vm_need_virtualize_apic_accesses(kvm))
3925                 if (alloc_apic_access_page(kvm) != 0)
3926                         goto free_vmcs;
3927
3928         if (enable_ept) {
3929                 if (!kvm->arch.ept_identity_map_addr)
3930                         kvm->arch.ept_identity_map_addr =
3931                                 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
3932                 if (alloc_identity_pagetable(kvm) != 0)
3933                         goto free_vmcs;
3934         }
3935
3936         return &vmx->vcpu;
3937
3938 free_vmcs:
3939         free_vmcs(vmx->vmcs);
3940 free_msrs:
3941         kfree(vmx->guest_msrs);
3942 uninit_vcpu:
3943         kvm_vcpu_uninit(&vmx->vcpu);
3944 free_vcpu:
3945         kmem_cache_free(kvm_vcpu_cache, vmx);
3946         return ERR_PTR(err);
3947 }
3948
3949 static void __init vmx_check_processor_compat(void *rtn)
3950 {
3951         struct vmcs_config vmcs_conf;
3952
3953         *(int *)rtn = 0;
3954         if (setup_vmcs_config(&vmcs_conf) < 0)
3955                 *(int *)rtn = -EIO;
3956         if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
3957                 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
3958                                 smp_processor_id());
3959                 *(int *)rtn = -EIO;
3960         }
3961 }
3962
3963 static int get_ept_level(void)
3964 {
3965         return VMX_EPT_DEFAULT_GAW + 1;
3966 }
3967
3968 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3969 {
3970         u64 ret;
3971
3972         /* For VT-d and EPT combination
3973          * 1. MMIO: always map as UC
3974          * 2. EPT with VT-d:
3975          *   a. VT-d without snooping control feature: can't guarantee the
3976          *      result, try to trust guest.
3977          *   b. VT-d with snooping control feature: snooping control feature of
3978          *      VT-d engine can guarantee the cache correctness. Just set it
3979          *      to WB to keep consistent with host. So the same as item 3.
3980          * 3. EPT without VT-d: always map as WB and set IGMT=1 to keep
3981          *    consistent with host MTRR
3982          */
3983         if (is_mmio)
3984                 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
3985         else if (vcpu->kvm->arch.iommu_domain &&
3986                 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
3987                 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
3988                       VMX_EPT_MT_EPTE_SHIFT;
3989         else
3990                 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
3991                         | VMX_EPT_IGMT_BIT;
3992
3993         return ret;
3994 }
3995
3996 static const struct trace_print_flags vmx_exit_reasons_str[] = {
3997         { EXIT_REASON_EXCEPTION_NMI,           "exception" },
3998         { EXIT_REASON_EXTERNAL_INTERRUPT,      "ext_irq" },
3999         { EXIT_REASON_TRIPLE_FAULT,            "triple_fault" },
4000         { EXIT_REASON_NMI_WINDOW,              "nmi_window" },
4001         { EXIT_REASON_IO_INSTRUCTION,          "io_instruction" },
4002         { EXIT_REASON_CR_ACCESS,               "cr_access" },
4003         { EXIT_REASON_DR_ACCESS,               "dr_access" },
4004         { EXIT_REASON_CPUID,                   "cpuid" },
4005         { EXIT_REASON_MSR_READ,                "rdmsr" },
4006         { EXIT_REASON_MSR_WRITE,               "wrmsr" },
4007         { EXIT_REASON_PENDING_INTERRUPT,       "interrupt_window" },
4008         { EXIT_REASON_HLT,                     "halt" },
4009         { EXIT_REASON_INVLPG,                  "invlpg" },
4010         { EXIT_REASON_VMCALL,                  "hypercall" },
4011         { EXIT_REASON_TPR_BELOW_THRESHOLD,     "tpr_below_thres" },
4012         { EXIT_REASON_APIC_ACCESS,             "apic_access" },
4013         { EXIT_REASON_WBINVD,                  "wbinvd" },
4014         { EXIT_REASON_TASK_SWITCH,             "task_switch" },
4015         { EXIT_REASON_EPT_VIOLATION,           "ept_violation" },
4016         { -1, NULL }
4017 };
4018
4019 static bool vmx_gb_page_enable(void)
4020 {
4021         return false;
4022 }
4023
4024 static inline u32 bit(int bitno)
4025 {
4026         return 1 << (bitno & 31);
4027 }
4028
4029 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
4030 {
4031         struct kvm_cpuid_entry2 *best;
4032         struct vcpu_vmx *vmx = to_vmx(vcpu);
4033         u32 exec_control;
4034
4035         vmx->rdtscp_enabled = false;
4036         if (vmx_rdtscp_supported()) {
4037                 exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
4038                 if (exec_control & SECONDARY_EXEC_RDTSCP) {
4039                         best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
4040                         if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
4041                                 vmx->rdtscp_enabled = true;
4042                         else {
4043                                 exec_control &= ~SECONDARY_EXEC_RDTSCP;
4044                                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
4045                                                 exec_control);
4046                         }
4047                 }
4048         }
4049 }
4050
4051 static struct kvm_x86_ops vmx_x86_ops = {
4052         .cpu_has_kvm_support = cpu_has_kvm_support,
4053         .disabled_by_bios = vmx_disabled_by_bios,
4054         .hardware_setup = hardware_setup,
4055         .hardware_unsetup = hardware_unsetup,
4056         .check_processor_compatibility = vmx_check_processor_compat,
4057         .hardware_enable = hardware_enable,
4058         .hardware_disable = hardware_disable,
4059         .cpu_has_accelerated_tpr = report_flexpriority,
4060
4061         .vcpu_create = vmx_create_vcpu,
4062         .vcpu_free = vmx_free_vcpu,
4063         .vcpu_reset = vmx_vcpu_reset,
4064
4065         .prepare_guest_switch = vmx_save_host_state,
4066         .vcpu_load = vmx_vcpu_load,
4067         .vcpu_put = vmx_vcpu_put,
4068
4069         .set_guest_debug = set_guest_debug,
4070         .get_msr = vmx_get_msr,
4071         .set_msr = vmx_set_msr,
4072         .get_segment_base = vmx_get_segment_base,
4073         .get_segment = vmx_get_segment,
4074         .set_segment = vmx_set_segment,
4075         .get_cpl = vmx_get_cpl,
4076         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
4077         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
4078         .set_cr0 = vmx_set_cr0,
4079         .set_cr3 = vmx_set_cr3,
4080         .set_cr4 = vmx_set_cr4,
4081         .set_efer = vmx_set_efer,
4082         .get_idt = vmx_get_idt,
4083         .set_idt = vmx_set_idt,
4084         .get_gdt = vmx_get_gdt,
4085         .set_gdt = vmx_set_gdt,
4086         .cache_reg = vmx_cache_reg,
4087         .get_rflags = vmx_get_rflags,
4088         .set_rflags = vmx_set_rflags,
4089
4090         .tlb_flush = vmx_flush_tlb,
4091
4092         .run = vmx_vcpu_run,
4093         .handle_exit = vmx_handle_exit,
4094         .skip_emulated_instruction = skip_emulated_instruction,
4095         .set_interrupt_shadow = vmx_set_interrupt_shadow,
4096         .get_interrupt_shadow = vmx_get_interrupt_shadow,
4097         .patch_hypercall = vmx_patch_hypercall,
4098         .set_irq = vmx_inject_irq,
4099         .set_nmi = vmx_inject_nmi,
4100         .queue_exception = vmx_queue_exception,
4101         .interrupt_allowed = vmx_interrupt_allowed,
4102         .nmi_allowed = vmx_nmi_allowed,
4103         .get_nmi_mask = vmx_get_nmi_mask,
4104         .set_nmi_mask = vmx_set_nmi_mask,
4105         .enable_nmi_window = enable_nmi_window,
4106         .enable_irq_window = enable_irq_window,
4107         .update_cr8_intercept = update_cr8_intercept,
4108
4109         .set_tss_addr = vmx_set_tss_addr,
4110         .get_tdp_level = get_ept_level,
4111         .get_mt_mask = vmx_get_mt_mask,
4112
4113         .exit_reasons_str = vmx_exit_reasons_str,
4114         .gb_page_enable = vmx_gb_page_enable,
4115
4116         .cpuid_update = vmx_cpuid_update,
4117
4118         .rdtscp_supported = vmx_rdtscp_supported,
4119 };
4120
4121 static int __init vmx_init(void)
4122 {
4123         int r, i;
4124
4125         rdmsrl_safe(MSR_EFER, &host_efer);
4126
4127         for (i = 0; i < NR_VMX_MSR; ++i)
4128                 kvm_define_shared_msr(i, vmx_msr_index[i]);
4129
4130         vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
4131         if (!vmx_io_bitmap_a)
4132                 return -ENOMEM;
4133
4134         vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
4135         if (!vmx_io_bitmap_b) {
4136                 r = -ENOMEM;
4137                 goto out;
4138         }
4139
4140         vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
4141         if (!vmx_msr_bitmap_legacy) {
4142                 r = -ENOMEM;
4143                 goto out1;
4144         }
4145
4146         vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
4147         if (!vmx_msr_bitmap_longmode) {
4148                 r = -ENOMEM;
4149                 goto out2;
4150         }
4151
4152         /*
4153          * Allow direct access to the PC debug port (it is often used for I/O
4154          * delays, but the vmexits simply slow things down).
4155          */
4156         memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
4157         clear_bit(0x80, vmx_io_bitmap_a);
4158
4159         memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
4160
4161         memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
4162         memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
4163
4164         set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
4165
4166         r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
4167         if (r)
4168                 goto out3;
4169
4170         vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
4171         vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
4172         vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
4173         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
4174         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
4175         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
4176
4177         if (enable_ept) {
4178                 bypass_guest_pf = 0;
4179                 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
4180                         VMX_EPT_WRITABLE_MASK);
4181                 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
4182                                 VMX_EPT_EXECUTABLE_MASK);
4183                 kvm_enable_tdp();
4184         } else
4185                 kvm_disable_tdp();
4186
4187         if (bypass_guest_pf)
4188                 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
4189
4190         return 0;
4191
4192 out3:
4193         free_page((unsigned long)vmx_msr_bitmap_longmode);
4194 out2:
4195         free_page((unsigned long)vmx_msr_bitmap_legacy);
4196 out1:
4197         free_page((unsigned long)vmx_io_bitmap_b);
4198 out:
4199         free_page((unsigned long)vmx_io_bitmap_a);
4200         return r;
4201 }
4202
4203 static void __exit vmx_exit(void)
4204 {
4205         free_page((unsigned long)vmx_msr_bitmap_legacy);
4206         free_page((unsigned long)vmx_msr_bitmap_longmode);
4207         free_page((unsigned long)vmx_io_bitmap_b);
4208         free_page((unsigned long)vmx_io_bitmap_a);
4209
4210         kvm_exit();
4211 }
4212
4213 module_init(vmx_init)
4214 module_exit(vmx_exit)