KVM: VMX: Do not fully reset PI descriptor on vCPU reset
[sfrench/cifs-2.6.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include "kvm_cache_regs.h"
38 #include "x86.h"
39
40 #include <asm/cpu.h>
41 #include <asm/io.h>
42 #include <asm/desc.h>
43 #include <asm/vmx.h>
44 #include <asm/virtext.h>
45 #include <asm/mce.h>
46 #include <asm/fpu/internal.h>
47 #include <asm/perf_event.h>
48 #include <asm/debugreg.h>
49 #include <asm/kexec.h>
50 #include <asm/apic.h>
51 #include <asm/irq_remapping.h>
52 #include <asm/mmu_context.h>
53
54 #include "trace.h"
55 #include "pmu.h"
56
57 #define __ex(x) __kvm_handle_fault_on_reboot(x)
58 #define __ex_clear(x, reg) \
59         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
60
61 MODULE_AUTHOR("Qumranet");
62 MODULE_LICENSE("GPL");
63
64 static const struct x86_cpu_id vmx_cpu_id[] = {
65         X86_FEATURE_MATCH(X86_FEATURE_VMX),
66         {}
67 };
68 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
69
70 static bool __read_mostly enable_vpid = 1;
71 module_param_named(vpid, enable_vpid, bool, 0444);
72
73 static bool __read_mostly flexpriority_enabled = 1;
74 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
75
76 static bool __read_mostly enable_ept = 1;
77 module_param_named(ept, enable_ept, bool, S_IRUGO);
78
79 static bool __read_mostly enable_unrestricted_guest = 1;
80 module_param_named(unrestricted_guest,
81                         enable_unrestricted_guest, bool, S_IRUGO);
82
83 static bool __read_mostly enable_ept_ad_bits = 1;
84 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
85
86 static bool __read_mostly emulate_invalid_guest_state = true;
87 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
88
89 static bool __read_mostly fasteoi = 1;
90 module_param(fasteoi, bool, S_IRUGO);
91
92 static bool __read_mostly enable_apicv = 1;
93 module_param(enable_apicv, bool, S_IRUGO);
94
95 static bool __read_mostly enable_shadow_vmcs = 1;
96 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
97 /*
98  * If nested=1, nested virtualization is supported, i.e., guests may use
99  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
100  * use VMX instructions.
101  */
102 static bool __read_mostly nested = 0;
103 module_param(nested, bool, S_IRUGO);
104
105 static u64 __read_mostly host_xss;
106
107 static bool __read_mostly enable_pml = 1;
108 module_param_named(pml, enable_pml, bool, S_IRUGO);
109
110 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
111
112 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
113 static int __read_mostly cpu_preemption_timer_multi;
114 static bool __read_mostly enable_preemption_timer = 1;
115 #ifdef CONFIG_X86_64
116 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
117 #endif
118
119 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
120 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
121 #define KVM_VM_CR0_ALWAYS_ON                                            \
122         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
123 #define KVM_CR4_GUEST_OWNED_BITS                                      \
124         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
125          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
126
127 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
128 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
129
130 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
131
132 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
133
134 /*
135  * Hyper-V requires all of these, so mark them as supported even though
136  * they are just treated the same as all-context.
137  */
138 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
139         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
140         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
141         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
142         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
143
144 /*
145  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
146  * ple_gap:    upper bound on the amount of time between two successive
147  *             executions of PAUSE in a loop. Also indicate if ple enabled.
148  *             According to test, this time is usually smaller than 128 cycles.
149  * ple_window: upper bound on the amount of time a guest is allowed to execute
150  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
151  *             less than 2^12 cycles
152  * Time is measured based on a counter that runs at the same rate as the TSC,
153  * refer SDM volume 3b section 21.6.13 & 22.1.3.
154  */
155 #define KVM_VMX_DEFAULT_PLE_GAP           128
156 #define KVM_VMX_DEFAULT_PLE_WINDOW        4096
157 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW   2
158 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
159 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX    \
160                 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
161
162 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
163 module_param(ple_gap, int, S_IRUGO);
164
165 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
166 module_param(ple_window, int, S_IRUGO);
167
168 /* Default doubles per-vcpu window every exit. */
169 static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
170 module_param(ple_window_grow, int, S_IRUGO);
171
172 /* Default resets per-vcpu window every exit to ple_window. */
173 static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
174 module_param(ple_window_shrink, int, S_IRUGO);
175
176 /* Default is to compute the maximum so we can never overflow. */
177 static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
178 static int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
179 module_param(ple_window_max, int, S_IRUGO);
180
181 extern const ulong vmx_return;
182
183 #define NR_AUTOLOAD_MSRS 8
184 #define VMCS02_POOL_SIZE 1
185
186 struct vmcs {
187         u32 revision_id;
188         u32 abort;
189         char data[0];
190 };
191
192 /*
193  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
194  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
195  * loaded on this CPU (so we can clear them if the CPU goes down).
196  */
197 struct loaded_vmcs {
198         struct vmcs *vmcs;
199         struct vmcs *shadow_vmcs;
200         int cpu;
201         bool launched;
202         bool nmi_known_unmasked;
203         unsigned long vmcs_host_cr3;    /* May not match real cr3 */
204         unsigned long vmcs_host_cr4;    /* May not match real cr4 */
205         struct list_head loaded_vmcss_on_cpu_link;
206 };
207
208 struct shared_msr_entry {
209         unsigned index;
210         u64 data;
211         u64 mask;
212 };
213
214 /*
215  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
216  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
217  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
218  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
219  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
220  * More than one of these structures may exist, if L1 runs multiple L2 guests.
221  * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
222  * underlying hardware which will be used to run L2.
223  * This structure is packed to ensure that its layout is identical across
224  * machines (necessary for live migration).
225  * If there are changes in this struct, VMCS12_REVISION must be changed.
226  */
227 typedef u64 natural_width;
228 struct __packed vmcs12 {
229         /* According to the Intel spec, a VMCS region must start with the
230          * following two fields. Then follow implementation-specific data.
231          */
232         u32 revision_id;
233         u32 abort;
234
235         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
236         u32 padding[7]; /* room for future expansion */
237
238         u64 io_bitmap_a;
239         u64 io_bitmap_b;
240         u64 msr_bitmap;
241         u64 vm_exit_msr_store_addr;
242         u64 vm_exit_msr_load_addr;
243         u64 vm_entry_msr_load_addr;
244         u64 tsc_offset;
245         u64 virtual_apic_page_addr;
246         u64 apic_access_addr;
247         u64 posted_intr_desc_addr;
248         u64 vm_function_control;
249         u64 ept_pointer;
250         u64 eoi_exit_bitmap0;
251         u64 eoi_exit_bitmap1;
252         u64 eoi_exit_bitmap2;
253         u64 eoi_exit_bitmap3;
254         u64 eptp_list_address;
255         u64 xss_exit_bitmap;
256         u64 guest_physical_address;
257         u64 vmcs_link_pointer;
258         u64 pml_address;
259         u64 guest_ia32_debugctl;
260         u64 guest_ia32_pat;
261         u64 guest_ia32_efer;
262         u64 guest_ia32_perf_global_ctrl;
263         u64 guest_pdptr0;
264         u64 guest_pdptr1;
265         u64 guest_pdptr2;
266         u64 guest_pdptr3;
267         u64 guest_bndcfgs;
268         u64 host_ia32_pat;
269         u64 host_ia32_efer;
270         u64 host_ia32_perf_global_ctrl;
271         u64 padding64[8]; /* room for future expansion */
272         /*
273          * To allow migration of L1 (complete with its L2 guests) between
274          * machines of different natural widths (32 or 64 bit), we cannot have
275          * unsigned long fields with no explict size. We use u64 (aliased
276          * natural_width) instead. Luckily, x86 is little-endian.
277          */
278         natural_width cr0_guest_host_mask;
279         natural_width cr4_guest_host_mask;
280         natural_width cr0_read_shadow;
281         natural_width cr4_read_shadow;
282         natural_width cr3_target_value0;
283         natural_width cr3_target_value1;
284         natural_width cr3_target_value2;
285         natural_width cr3_target_value3;
286         natural_width exit_qualification;
287         natural_width guest_linear_address;
288         natural_width guest_cr0;
289         natural_width guest_cr3;
290         natural_width guest_cr4;
291         natural_width guest_es_base;
292         natural_width guest_cs_base;
293         natural_width guest_ss_base;
294         natural_width guest_ds_base;
295         natural_width guest_fs_base;
296         natural_width guest_gs_base;
297         natural_width guest_ldtr_base;
298         natural_width guest_tr_base;
299         natural_width guest_gdtr_base;
300         natural_width guest_idtr_base;
301         natural_width guest_dr7;
302         natural_width guest_rsp;
303         natural_width guest_rip;
304         natural_width guest_rflags;
305         natural_width guest_pending_dbg_exceptions;
306         natural_width guest_sysenter_esp;
307         natural_width guest_sysenter_eip;
308         natural_width host_cr0;
309         natural_width host_cr3;
310         natural_width host_cr4;
311         natural_width host_fs_base;
312         natural_width host_gs_base;
313         natural_width host_tr_base;
314         natural_width host_gdtr_base;
315         natural_width host_idtr_base;
316         natural_width host_ia32_sysenter_esp;
317         natural_width host_ia32_sysenter_eip;
318         natural_width host_rsp;
319         natural_width host_rip;
320         natural_width paddingl[8]; /* room for future expansion */
321         u32 pin_based_vm_exec_control;
322         u32 cpu_based_vm_exec_control;
323         u32 exception_bitmap;
324         u32 page_fault_error_code_mask;
325         u32 page_fault_error_code_match;
326         u32 cr3_target_count;
327         u32 vm_exit_controls;
328         u32 vm_exit_msr_store_count;
329         u32 vm_exit_msr_load_count;
330         u32 vm_entry_controls;
331         u32 vm_entry_msr_load_count;
332         u32 vm_entry_intr_info_field;
333         u32 vm_entry_exception_error_code;
334         u32 vm_entry_instruction_len;
335         u32 tpr_threshold;
336         u32 secondary_vm_exec_control;
337         u32 vm_instruction_error;
338         u32 vm_exit_reason;
339         u32 vm_exit_intr_info;
340         u32 vm_exit_intr_error_code;
341         u32 idt_vectoring_info_field;
342         u32 idt_vectoring_error_code;
343         u32 vm_exit_instruction_len;
344         u32 vmx_instruction_info;
345         u32 guest_es_limit;
346         u32 guest_cs_limit;
347         u32 guest_ss_limit;
348         u32 guest_ds_limit;
349         u32 guest_fs_limit;
350         u32 guest_gs_limit;
351         u32 guest_ldtr_limit;
352         u32 guest_tr_limit;
353         u32 guest_gdtr_limit;
354         u32 guest_idtr_limit;
355         u32 guest_es_ar_bytes;
356         u32 guest_cs_ar_bytes;
357         u32 guest_ss_ar_bytes;
358         u32 guest_ds_ar_bytes;
359         u32 guest_fs_ar_bytes;
360         u32 guest_gs_ar_bytes;
361         u32 guest_ldtr_ar_bytes;
362         u32 guest_tr_ar_bytes;
363         u32 guest_interruptibility_info;
364         u32 guest_activity_state;
365         u32 guest_sysenter_cs;
366         u32 host_ia32_sysenter_cs;
367         u32 vmx_preemption_timer_value;
368         u32 padding32[7]; /* room for future expansion */
369         u16 virtual_processor_id;
370         u16 posted_intr_nv;
371         u16 guest_es_selector;
372         u16 guest_cs_selector;
373         u16 guest_ss_selector;
374         u16 guest_ds_selector;
375         u16 guest_fs_selector;
376         u16 guest_gs_selector;
377         u16 guest_ldtr_selector;
378         u16 guest_tr_selector;
379         u16 guest_intr_status;
380         u16 guest_pml_index;
381         u16 host_es_selector;
382         u16 host_cs_selector;
383         u16 host_ss_selector;
384         u16 host_ds_selector;
385         u16 host_fs_selector;
386         u16 host_gs_selector;
387         u16 host_tr_selector;
388 };
389
390 /*
391  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
392  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
393  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
394  */
395 #define VMCS12_REVISION 0x11e57ed0
396
397 /*
398  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
399  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
400  * current implementation, 4K are reserved to avoid future complications.
401  */
402 #define VMCS12_SIZE 0x1000
403
404 /* Used to remember the last vmcs02 used for some recently used vmcs12s */
405 struct vmcs02_list {
406         struct list_head list;
407         gpa_t vmptr;
408         struct loaded_vmcs vmcs02;
409 };
410
411 /*
412  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
413  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
414  */
415 struct nested_vmx {
416         /* Has the level1 guest done vmxon? */
417         bool vmxon;
418         gpa_t vmxon_ptr;
419         bool pml_full;
420
421         /* The guest-physical address of the current VMCS L1 keeps for L2 */
422         gpa_t current_vmptr;
423         /*
424          * Cache of the guest's VMCS, existing outside of guest memory.
425          * Loaded from guest memory during VMPTRLD. Flushed to guest
426          * memory during VMCLEAR and VMPTRLD.
427          */
428         struct vmcs12 *cached_vmcs12;
429         /*
430          * Indicates if the shadow vmcs must be updated with the
431          * data hold by vmcs12
432          */
433         bool sync_shadow_vmcs;
434
435         /* vmcs02_list cache of VMCSs recently used to run L2 guests */
436         struct list_head vmcs02_pool;
437         int vmcs02_num;
438         bool change_vmcs01_virtual_x2apic_mode;
439         /* L2 must run next, and mustn't decide to exit to L1. */
440         bool nested_run_pending;
441         /*
442          * Guest pages referred to in vmcs02 with host-physical pointers, so
443          * we must keep them pinned while L2 runs.
444          */
445         struct page *apic_access_page;
446         struct page *virtual_apic_page;
447         struct page *pi_desc_page;
448         struct pi_desc *pi_desc;
449         bool pi_pending;
450         u16 posted_intr_nv;
451
452         unsigned long *msr_bitmap;
453
454         struct hrtimer preemption_timer;
455         bool preemption_timer_expired;
456
457         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
458         u64 vmcs01_debugctl;
459
460         u16 vpid02;
461         u16 last_vpid;
462
463         /*
464          * We only store the "true" versions of the VMX capability MSRs. We
465          * generate the "non-true" versions by setting the must-be-1 bits
466          * according to the SDM.
467          */
468         u32 nested_vmx_procbased_ctls_low;
469         u32 nested_vmx_procbased_ctls_high;
470         u32 nested_vmx_secondary_ctls_low;
471         u32 nested_vmx_secondary_ctls_high;
472         u32 nested_vmx_pinbased_ctls_low;
473         u32 nested_vmx_pinbased_ctls_high;
474         u32 nested_vmx_exit_ctls_low;
475         u32 nested_vmx_exit_ctls_high;
476         u32 nested_vmx_entry_ctls_low;
477         u32 nested_vmx_entry_ctls_high;
478         u32 nested_vmx_misc_low;
479         u32 nested_vmx_misc_high;
480         u32 nested_vmx_ept_caps;
481         u32 nested_vmx_vpid_caps;
482         u64 nested_vmx_basic;
483         u64 nested_vmx_cr0_fixed0;
484         u64 nested_vmx_cr0_fixed1;
485         u64 nested_vmx_cr4_fixed0;
486         u64 nested_vmx_cr4_fixed1;
487         u64 nested_vmx_vmcs_enum;
488         u64 nested_vmx_vmfunc_controls;
489 };
490
491 #define POSTED_INTR_ON  0
492 #define POSTED_INTR_SN  1
493
494 /* Posted-Interrupt Descriptor */
495 struct pi_desc {
496         u32 pir[8];     /* Posted interrupt requested */
497         union {
498                 struct {
499                                 /* bit 256 - Outstanding Notification */
500                         u16     on      : 1,
501                                 /* bit 257 - Suppress Notification */
502                                 sn      : 1,
503                                 /* bit 271:258 - Reserved */
504                                 rsvd_1  : 14;
505                                 /* bit 279:272 - Notification Vector */
506                         u8      nv;
507                                 /* bit 287:280 - Reserved */
508                         u8      rsvd_2;
509                                 /* bit 319:288 - Notification Destination */
510                         u32     ndst;
511                 };
512                 u64 control;
513         };
514         u32 rsvd[6];
515 } __aligned(64);
516
517 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
518 {
519         return test_and_set_bit(POSTED_INTR_ON,
520                         (unsigned long *)&pi_desc->control);
521 }
522
523 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
524 {
525         return test_and_clear_bit(POSTED_INTR_ON,
526                         (unsigned long *)&pi_desc->control);
527 }
528
529 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
530 {
531         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
532 }
533
534 static inline void pi_clear_sn(struct pi_desc *pi_desc)
535 {
536         return clear_bit(POSTED_INTR_SN,
537                         (unsigned long *)&pi_desc->control);
538 }
539
540 static inline void pi_set_sn(struct pi_desc *pi_desc)
541 {
542         return set_bit(POSTED_INTR_SN,
543                         (unsigned long *)&pi_desc->control);
544 }
545
546 static inline void pi_clear_on(struct pi_desc *pi_desc)
547 {
548         clear_bit(POSTED_INTR_ON,
549                   (unsigned long *)&pi_desc->control);
550 }
551
552 static inline int pi_test_on(struct pi_desc *pi_desc)
553 {
554         return test_bit(POSTED_INTR_ON,
555                         (unsigned long *)&pi_desc->control);
556 }
557
558 static inline int pi_test_sn(struct pi_desc *pi_desc)
559 {
560         return test_bit(POSTED_INTR_SN,
561                         (unsigned long *)&pi_desc->control);
562 }
563
564 struct vcpu_vmx {
565         struct kvm_vcpu       vcpu;
566         unsigned long         host_rsp;
567         u8                    fail;
568         u32                   exit_intr_info;
569         u32                   idt_vectoring_info;
570         ulong                 rflags;
571         struct shared_msr_entry *guest_msrs;
572         int                   nmsrs;
573         int                   save_nmsrs;
574         unsigned long         host_idt_base;
575 #ifdef CONFIG_X86_64
576         u64                   msr_host_kernel_gs_base;
577         u64                   msr_guest_kernel_gs_base;
578 #endif
579         u32 vm_entry_controls_shadow;
580         u32 vm_exit_controls_shadow;
581         u32 secondary_exec_control;
582
583         /*
584          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
585          * non-nested (L1) guest, it always points to vmcs01. For a nested
586          * guest (L2), it points to a different VMCS.
587          */
588         struct loaded_vmcs    vmcs01;
589         struct loaded_vmcs   *loaded_vmcs;
590         bool                  __launched; /* temporary, used in vmx_vcpu_run */
591         struct msr_autoload {
592                 unsigned nr;
593                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
594                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
595         } msr_autoload;
596         struct {
597                 int           loaded;
598                 u16           fs_sel, gs_sel, ldt_sel;
599 #ifdef CONFIG_X86_64
600                 u16           ds_sel, es_sel;
601 #endif
602                 int           gs_ldt_reload_needed;
603                 int           fs_reload_needed;
604                 u64           msr_host_bndcfgs;
605         } host_state;
606         struct {
607                 int vm86_active;
608                 ulong save_rflags;
609                 struct kvm_segment segs[8];
610         } rmode;
611         struct {
612                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
613                 struct kvm_save_segment {
614                         u16 selector;
615                         unsigned long base;
616                         u32 limit;
617                         u32 ar;
618                 } seg[8];
619         } segment_cache;
620         int vpid;
621         bool emulation_required;
622
623         u32 exit_reason;
624
625         /* Posted interrupt descriptor */
626         struct pi_desc pi_desc;
627
628         /* Support for a guest hypervisor (nested VMX) */
629         struct nested_vmx nested;
630
631         /* Dynamic PLE window. */
632         int ple_window;
633         bool ple_window_dirty;
634
635         /* Support for PML */
636 #define PML_ENTITY_NUM          512
637         struct page *pml_pg;
638
639         /* apic deadline value in host tsc */
640         u64 hv_deadline_tsc;
641
642         u64 current_tsc_ratio;
643
644         u32 host_pkru;
645
646         /*
647          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
648          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
649          * in msr_ia32_feature_control_valid_bits.
650          */
651         u64 msr_ia32_feature_control;
652         u64 msr_ia32_feature_control_valid_bits;
653 };
654
655 enum segment_cache_field {
656         SEG_FIELD_SEL = 0,
657         SEG_FIELD_BASE = 1,
658         SEG_FIELD_LIMIT = 2,
659         SEG_FIELD_AR = 3,
660
661         SEG_FIELD_NR = 4
662 };
663
664 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
665 {
666         return container_of(vcpu, struct vcpu_vmx, vcpu);
667 }
668
669 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
670 {
671         return &(to_vmx(vcpu)->pi_desc);
672 }
673
674 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
675 #define FIELD(number, name)     [number] = VMCS12_OFFSET(name)
676 #define FIELD64(number, name)   [number] = VMCS12_OFFSET(name), \
677                                 [number##_HIGH] = VMCS12_OFFSET(name)+4
678
679
680 static unsigned long shadow_read_only_fields[] = {
681         /*
682          * We do NOT shadow fields that are modified when L0
683          * traps and emulates any vmx instruction (e.g. VMPTRLD,
684          * VMXON...) executed by L1.
685          * For example, VM_INSTRUCTION_ERROR is read
686          * by L1 if a vmx instruction fails (part of the error path).
687          * Note the code assumes this logic. If for some reason
688          * we start shadowing these fields then we need to
689          * force a shadow sync when L0 emulates vmx instructions
690          * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
691          * by nested_vmx_failValid)
692          */
693         VM_EXIT_REASON,
694         VM_EXIT_INTR_INFO,
695         VM_EXIT_INSTRUCTION_LEN,
696         IDT_VECTORING_INFO_FIELD,
697         IDT_VECTORING_ERROR_CODE,
698         VM_EXIT_INTR_ERROR_CODE,
699         EXIT_QUALIFICATION,
700         GUEST_LINEAR_ADDRESS,
701         GUEST_PHYSICAL_ADDRESS
702 };
703 static int max_shadow_read_only_fields =
704         ARRAY_SIZE(shadow_read_only_fields);
705
706 static unsigned long shadow_read_write_fields[] = {
707         TPR_THRESHOLD,
708         GUEST_RIP,
709         GUEST_RSP,
710         GUEST_CR0,
711         GUEST_CR3,
712         GUEST_CR4,
713         GUEST_INTERRUPTIBILITY_INFO,
714         GUEST_RFLAGS,
715         GUEST_CS_SELECTOR,
716         GUEST_CS_AR_BYTES,
717         GUEST_CS_LIMIT,
718         GUEST_CS_BASE,
719         GUEST_ES_BASE,
720         GUEST_BNDCFGS,
721         CR0_GUEST_HOST_MASK,
722         CR0_READ_SHADOW,
723         CR4_READ_SHADOW,
724         TSC_OFFSET,
725         EXCEPTION_BITMAP,
726         CPU_BASED_VM_EXEC_CONTROL,
727         VM_ENTRY_EXCEPTION_ERROR_CODE,
728         VM_ENTRY_INTR_INFO_FIELD,
729         VM_ENTRY_INSTRUCTION_LEN,
730         VM_ENTRY_EXCEPTION_ERROR_CODE,
731         HOST_FS_BASE,
732         HOST_GS_BASE,
733         HOST_FS_SELECTOR,
734         HOST_GS_SELECTOR
735 };
736 static int max_shadow_read_write_fields =
737         ARRAY_SIZE(shadow_read_write_fields);
738
739 static const unsigned short vmcs_field_to_offset_table[] = {
740         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
741         FIELD(POSTED_INTR_NV, posted_intr_nv),
742         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
743         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
744         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
745         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
746         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
747         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
748         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
749         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
750         FIELD(GUEST_INTR_STATUS, guest_intr_status),
751         FIELD(GUEST_PML_INDEX, guest_pml_index),
752         FIELD(HOST_ES_SELECTOR, host_es_selector),
753         FIELD(HOST_CS_SELECTOR, host_cs_selector),
754         FIELD(HOST_SS_SELECTOR, host_ss_selector),
755         FIELD(HOST_DS_SELECTOR, host_ds_selector),
756         FIELD(HOST_FS_SELECTOR, host_fs_selector),
757         FIELD(HOST_GS_SELECTOR, host_gs_selector),
758         FIELD(HOST_TR_SELECTOR, host_tr_selector),
759         FIELD64(IO_BITMAP_A, io_bitmap_a),
760         FIELD64(IO_BITMAP_B, io_bitmap_b),
761         FIELD64(MSR_BITMAP, msr_bitmap),
762         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
763         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
764         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
765         FIELD64(TSC_OFFSET, tsc_offset),
766         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
767         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
768         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
769         FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
770         FIELD64(EPT_POINTER, ept_pointer),
771         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
772         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
773         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
774         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
775         FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
776         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
777         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
778         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
779         FIELD64(PML_ADDRESS, pml_address),
780         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
781         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
782         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
783         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
784         FIELD64(GUEST_PDPTR0, guest_pdptr0),
785         FIELD64(GUEST_PDPTR1, guest_pdptr1),
786         FIELD64(GUEST_PDPTR2, guest_pdptr2),
787         FIELD64(GUEST_PDPTR3, guest_pdptr3),
788         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
789         FIELD64(HOST_IA32_PAT, host_ia32_pat),
790         FIELD64(HOST_IA32_EFER, host_ia32_efer),
791         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
792         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
793         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
794         FIELD(EXCEPTION_BITMAP, exception_bitmap),
795         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
796         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
797         FIELD(CR3_TARGET_COUNT, cr3_target_count),
798         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
799         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
800         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
801         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
802         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
803         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
804         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
805         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
806         FIELD(TPR_THRESHOLD, tpr_threshold),
807         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
808         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
809         FIELD(VM_EXIT_REASON, vm_exit_reason),
810         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
811         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
812         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
813         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
814         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
815         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
816         FIELD(GUEST_ES_LIMIT, guest_es_limit),
817         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
818         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
819         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
820         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
821         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
822         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
823         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
824         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
825         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
826         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
827         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
828         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
829         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
830         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
831         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
832         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
833         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
834         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
835         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
836         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
837         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
838         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
839         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
840         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
841         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
842         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
843         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
844         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
845         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
846         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
847         FIELD(EXIT_QUALIFICATION, exit_qualification),
848         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
849         FIELD(GUEST_CR0, guest_cr0),
850         FIELD(GUEST_CR3, guest_cr3),
851         FIELD(GUEST_CR4, guest_cr4),
852         FIELD(GUEST_ES_BASE, guest_es_base),
853         FIELD(GUEST_CS_BASE, guest_cs_base),
854         FIELD(GUEST_SS_BASE, guest_ss_base),
855         FIELD(GUEST_DS_BASE, guest_ds_base),
856         FIELD(GUEST_FS_BASE, guest_fs_base),
857         FIELD(GUEST_GS_BASE, guest_gs_base),
858         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
859         FIELD(GUEST_TR_BASE, guest_tr_base),
860         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
861         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
862         FIELD(GUEST_DR7, guest_dr7),
863         FIELD(GUEST_RSP, guest_rsp),
864         FIELD(GUEST_RIP, guest_rip),
865         FIELD(GUEST_RFLAGS, guest_rflags),
866         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
867         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
868         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
869         FIELD(HOST_CR0, host_cr0),
870         FIELD(HOST_CR3, host_cr3),
871         FIELD(HOST_CR4, host_cr4),
872         FIELD(HOST_FS_BASE, host_fs_base),
873         FIELD(HOST_GS_BASE, host_gs_base),
874         FIELD(HOST_TR_BASE, host_tr_base),
875         FIELD(HOST_GDTR_BASE, host_gdtr_base),
876         FIELD(HOST_IDTR_BASE, host_idtr_base),
877         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
878         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
879         FIELD(HOST_RSP, host_rsp),
880         FIELD(HOST_RIP, host_rip),
881 };
882
883 static inline short vmcs_field_to_offset(unsigned long field)
884 {
885         BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
886
887         if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
888             vmcs_field_to_offset_table[field] == 0)
889                 return -ENOENT;
890
891         return vmcs_field_to_offset_table[field];
892 }
893
894 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
895 {
896         return to_vmx(vcpu)->nested.cached_vmcs12;
897 }
898
899 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
900 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
901 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
902 static bool vmx_xsaves_supported(void);
903 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
904 static void vmx_set_segment(struct kvm_vcpu *vcpu,
905                             struct kvm_segment *var, int seg);
906 static void vmx_get_segment(struct kvm_vcpu *vcpu,
907                             struct kvm_segment *var, int seg);
908 static bool guest_state_valid(struct kvm_vcpu *vcpu);
909 static u32 vmx_segment_access_rights(struct kvm_segment *var);
910 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
911 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
912 static int alloc_identity_pagetable(struct kvm *kvm);
913 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
914 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
915 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
916                                             u16 error_code);
917
918 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
919 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
920 /*
921  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
922  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
923  */
924 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
925
926 /*
927  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
928  * can find which vCPU should be waken up.
929  */
930 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
931 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
932
933 enum {
934         VMX_IO_BITMAP_A,
935         VMX_IO_BITMAP_B,
936         VMX_MSR_BITMAP_LEGACY,
937         VMX_MSR_BITMAP_LONGMODE,
938         VMX_MSR_BITMAP_LEGACY_X2APIC_APICV,
939         VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV,
940         VMX_MSR_BITMAP_LEGACY_X2APIC,
941         VMX_MSR_BITMAP_LONGMODE_X2APIC,
942         VMX_VMREAD_BITMAP,
943         VMX_VMWRITE_BITMAP,
944         VMX_BITMAP_NR
945 };
946
947 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
948
949 #define vmx_io_bitmap_a                      (vmx_bitmap[VMX_IO_BITMAP_A])
950 #define vmx_io_bitmap_b                      (vmx_bitmap[VMX_IO_BITMAP_B])
951 #define vmx_msr_bitmap_legacy                (vmx_bitmap[VMX_MSR_BITMAP_LEGACY])
952 #define vmx_msr_bitmap_longmode              (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE])
953 #define vmx_msr_bitmap_legacy_x2apic_apicv   (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV])
954 #define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV])
955 #define vmx_msr_bitmap_legacy_x2apic         (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC])
956 #define vmx_msr_bitmap_longmode_x2apic       (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC])
957 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
958 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
959
960 static bool cpu_has_load_ia32_efer;
961 static bool cpu_has_load_perf_global_ctrl;
962
963 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
964 static DEFINE_SPINLOCK(vmx_vpid_lock);
965
966 static struct vmcs_config {
967         int size;
968         int order;
969         u32 basic_cap;
970         u32 revision_id;
971         u32 pin_based_exec_ctrl;
972         u32 cpu_based_exec_ctrl;
973         u32 cpu_based_2nd_exec_ctrl;
974         u32 vmexit_ctrl;
975         u32 vmentry_ctrl;
976 } vmcs_config;
977
978 static struct vmx_capability {
979         u32 ept;
980         u32 vpid;
981 } vmx_capability;
982
983 #define VMX_SEGMENT_FIELD(seg)                                  \
984         [VCPU_SREG_##seg] = {                                   \
985                 .selector = GUEST_##seg##_SELECTOR,             \
986                 .base = GUEST_##seg##_BASE,                     \
987                 .limit = GUEST_##seg##_LIMIT,                   \
988                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
989         }
990
991 static const struct kvm_vmx_segment_field {
992         unsigned selector;
993         unsigned base;
994         unsigned limit;
995         unsigned ar_bytes;
996 } kvm_vmx_segment_fields[] = {
997         VMX_SEGMENT_FIELD(CS),
998         VMX_SEGMENT_FIELD(DS),
999         VMX_SEGMENT_FIELD(ES),
1000         VMX_SEGMENT_FIELD(FS),
1001         VMX_SEGMENT_FIELD(GS),
1002         VMX_SEGMENT_FIELD(SS),
1003         VMX_SEGMENT_FIELD(TR),
1004         VMX_SEGMENT_FIELD(LDTR),
1005 };
1006
1007 static u64 host_efer;
1008
1009 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1010
1011 /*
1012  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1013  * away by decrementing the array size.
1014  */
1015 static const u32 vmx_msr_index[] = {
1016 #ifdef CONFIG_X86_64
1017         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1018 #endif
1019         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1020 };
1021
1022 static inline bool is_exception_n(u32 intr_info, u8 vector)
1023 {
1024         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1025                              INTR_INFO_VALID_MASK)) ==
1026                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1027 }
1028
1029 static inline bool is_debug(u32 intr_info)
1030 {
1031         return is_exception_n(intr_info, DB_VECTOR);
1032 }
1033
1034 static inline bool is_breakpoint(u32 intr_info)
1035 {
1036         return is_exception_n(intr_info, BP_VECTOR);
1037 }
1038
1039 static inline bool is_page_fault(u32 intr_info)
1040 {
1041         return is_exception_n(intr_info, PF_VECTOR);
1042 }
1043
1044 static inline bool is_no_device(u32 intr_info)
1045 {
1046         return is_exception_n(intr_info, NM_VECTOR);
1047 }
1048
1049 static inline bool is_invalid_opcode(u32 intr_info)
1050 {
1051         return is_exception_n(intr_info, UD_VECTOR);
1052 }
1053
1054 static inline bool is_external_interrupt(u32 intr_info)
1055 {
1056         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1057                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1058 }
1059
1060 static inline bool is_machine_check(u32 intr_info)
1061 {
1062         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1063                              INTR_INFO_VALID_MASK)) ==
1064                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1065 }
1066
1067 static inline bool cpu_has_vmx_msr_bitmap(void)
1068 {
1069         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1070 }
1071
1072 static inline bool cpu_has_vmx_tpr_shadow(void)
1073 {
1074         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1075 }
1076
1077 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1078 {
1079         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1080 }
1081
1082 static inline bool cpu_has_secondary_exec_ctrls(void)
1083 {
1084         return vmcs_config.cpu_based_exec_ctrl &
1085                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1086 }
1087
1088 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1089 {
1090         return vmcs_config.cpu_based_2nd_exec_ctrl &
1091                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1092 }
1093
1094 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1095 {
1096         return vmcs_config.cpu_based_2nd_exec_ctrl &
1097                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1098 }
1099
1100 static inline bool cpu_has_vmx_apic_register_virt(void)
1101 {
1102         return vmcs_config.cpu_based_2nd_exec_ctrl &
1103                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1104 }
1105
1106 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1107 {
1108         return vmcs_config.cpu_based_2nd_exec_ctrl &
1109                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1110 }
1111
1112 /*
1113  * Comment's format: document - errata name - stepping - processor name.
1114  * Refer from
1115  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1116  */
1117 static u32 vmx_preemption_cpu_tfms[] = {
1118 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1119 0x000206E6,
1120 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1121 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1122 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1123 0x00020652,
1124 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1125 0x00020655,
1126 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1127 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1128 /*
1129  * 320767.pdf - AAP86  - B1 -
1130  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1131  */
1132 0x000106E5,
1133 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1134 0x000106A0,
1135 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1136 0x000106A1,
1137 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1138 0x000106A4,
1139  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1140  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1141  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1142 0x000106A5,
1143 };
1144
1145 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1146 {
1147         u32 eax = cpuid_eax(0x00000001), i;
1148
1149         /* Clear the reserved bits */
1150         eax &= ~(0x3U << 14 | 0xfU << 28);
1151         for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1152                 if (eax == vmx_preemption_cpu_tfms[i])
1153                         return true;
1154
1155         return false;
1156 }
1157
1158 static inline bool cpu_has_vmx_preemption_timer(void)
1159 {
1160         return vmcs_config.pin_based_exec_ctrl &
1161                 PIN_BASED_VMX_PREEMPTION_TIMER;
1162 }
1163
1164 static inline bool cpu_has_vmx_posted_intr(void)
1165 {
1166         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1167                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1168 }
1169
1170 static inline bool cpu_has_vmx_apicv(void)
1171 {
1172         return cpu_has_vmx_apic_register_virt() &&
1173                 cpu_has_vmx_virtual_intr_delivery() &&
1174                 cpu_has_vmx_posted_intr();
1175 }
1176
1177 static inline bool cpu_has_vmx_flexpriority(void)
1178 {
1179         return cpu_has_vmx_tpr_shadow() &&
1180                 cpu_has_vmx_virtualize_apic_accesses();
1181 }
1182
1183 static inline bool cpu_has_vmx_ept_execute_only(void)
1184 {
1185         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1186 }
1187
1188 static inline bool cpu_has_vmx_ept_2m_page(void)
1189 {
1190         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1191 }
1192
1193 static inline bool cpu_has_vmx_ept_1g_page(void)
1194 {
1195         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1196 }
1197
1198 static inline bool cpu_has_vmx_ept_4levels(void)
1199 {
1200         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1201 }
1202
1203 static inline bool cpu_has_vmx_ept_mt_wb(void)
1204 {
1205         return vmx_capability.ept & VMX_EPTP_WB_BIT;
1206 }
1207
1208 static inline bool cpu_has_vmx_ept_5levels(void)
1209 {
1210         return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1211 }
1212
1213 static inline bool cpu_has_vmx_ept_ad_bits(void)
1214 {
1215         return vmx_capability.ept & VMX_EPT_AD_BIT;
1216 }
1217
1218 static inline bool cpu_has_vmx_invept_context(void)
1219 {
1220         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1221 }
1222
1223 static inline bool cpu_has_vmx_invept_global(void)
1224 {
1225         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1226 }
1227
1228 static inline bool cpu_has_vmx_invvpid_single(void)
1229 {
1230         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1231 }
1232
1233 static inline bool cpu_has_vmx_invvpid_global(void)
1234 {
1235         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1236 }
1237
1238 static inline bool cpu_has_vmx_invvpid(void)
1239 {
1240         return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1241 }
1242
1243 static inline bool cpu_has_vmx_ept(void)
1244 {
1245         return vmcs_config.cpu_based_2nd_exec_ctrl &
1246                 SECONDARY_EXEC_ENABLE_EPT;
1247 }
1248
1249 static inline bool cpu_has_vmx_unrestricted_guest(void)
1250 {
1251         return vmcs_config.cpu_based_2nd_exec_ctrl &
1252                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1253 }
1254
1255 static inline bool cpu_has_vmx_ple(void)
1256 {
1257         return vmcs_config.cpu_based_2nd_exec_ctrl &
1258                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1259 }
1260
1261 static inline bool cpu_has_vmx_basic_inout(void)
1262 {
1263         return  (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1264 }
1265
1266 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1267 {
1268         return flexpriority_enabled && lapic_in_kernel(vcpu);
1269 }
1270
1271 static inline bool cpu_has_vmx_vpid(void)
1272 {
1273         return vmcs_config.cpu_based_2nd_exec_ctrl &
1274                 SECONDARY_EXEC_ENABLE_VPID;
1275 }
1276
1277 static inline bool cpu_has_vmx_rdtscp(void)
1278 {
1279         return vmcs_config.cpu_based_2nd_exec_ctrl &
1280                 SECONDARY_EXEC_RDTSCP;
1281 }
1282
1283 static inline bool cpu_has_vmx_invpcid(void)
1284 {
1285         return vmcs_config.cpu_based_2nd_exec_ctrl &
1286                 SECONDARY_EXEC_ENABLE_INVPCID;
1287 }
1288
1289 static inline bool cpu_has_vmx_wbinvd_exit(void)
1290 {
1291         return vmcs_config.cpu_based_2nd_exec_ctrl &
1292                 SECONDARY_EXEC_WBINVD_EXITING;
1293 }
1294
1295 static inline bool cpu_has_vmx_shadow_vmcs(void)
1296 {
1297         u64 vmx_msr;
1298         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1299         /* check if the cpu supports writing r/o exit information fields */
1300         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1301                 return false;
1302
1303         return vmcs_config.cpu_based_2nd_exec_ctrl &
1304                 SECONDARY_EXEC_SHADOW_VMCS;
1305 }
1306
1307 static inline bool cpu_has_vmx_pml(void)
1308 {
1309         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1310 }
1311
1312 static inline bool cpu_has_vmx_tsc_scaling(void)
1313 {
1314         return vmcs_config.cpu_based_2nd_exec_ctrl &
1315                 SECONDARY_EXEC_TSC_SCALING;
1316 }
1317
1318 static inline bool cpu_has_vmx_vmfunc(void)
1319 {
1320         return vmcs_config.cpu_based_2nd_exec_ctrl &
1321                 SECONDARY_EXEC_ENABLE_VMFUNC;
1322 }
1323
1324 static inline bool report_flexpriority(void)
1325 {
1326         return flexpriority_enabled;
1327 }
1328
1329 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1330 {
1331         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low);
1332 }
1333
1334 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1335 {
1336         return vmcs12->cpu_based_vm_exec_control & bit;
1337 }
1338
1339 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1340 {
1341         return (vmcs12->cpu_based_vm_exec_control &
1342                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1343                 (vmcs12->secondary_vm_exec_control & bit);
1344 }
1345
1346 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1347 {
1348         return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1349 }
1350
1351 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1352 {
1353         return vmcs12->pin_based_vm_exec_control &
1354                 PIN_BASED_VMX_PREEMPTION_TIMER;
1355 }
1356
1357 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1358 {
1359         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1360 }
1361
1362 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1363 {
1364         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1365 }
1366
1367 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1368 {
1369         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1370 }
1371
1372 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1373 {
1374         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1375 }
1376
1377 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1378 {
1379         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1380 }
1381
1382 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1383 {
1384         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1385 }
1386
1387 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1388 {
1389         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1390 }
1391
1392 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1393 {
1394         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1395 }
1396
1397 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
1398 {
1399         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
1400 }
1401
1402 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
1403 {
1404         return nested_cpu_has_vmfunc(vmcs12) &&
1405                 (vmcs12->vm_function_control &
1406                  VMX_VMFUNC_EPTP_SWITCHING);
1407 }
1408
1409 static inline bool is_nmi(u32 intr_info)
1410 {
1411         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1412                 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
1413 }
1414
1415 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1416                               u32 exit_intr_info,
1417                               unsigned long exit_qualification);
1418 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1419                         struct vmcs12 *vmcs12,
1420                         u32 reason, unsigned long qualification);
1421
1422 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1423 {
1424         int i;
1425
1426         for (i = 0; i < vmx->nmsrs; ++i)
1427                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1428                         return i;
1429         return -1;
1430 }
1431
1432 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1433 {
1434     struct {
1435         u64 vpid : 16;
1436         u64 rsvd : 48;
1437         u64 gva;
1438     } operand = { vpid, 0, gva };
1439
1440     asm volatile (__ex(ASM_VMX_INVVPID)
1441                   /* CF==1 or ZF==1 --> rc = -1 */
1442                   "; ja 1f ; ud2 ; 1:"
1443                   : : "a"(&operand), "c"(ext) : "cc", "memory");
1444 }
1445
1446 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1447 {
1448         struct {
1449                 u64 eptp, gpa;
1450         } operand = {eptp, gpa};
1451
1452         asm volatile (__ex(ASM_VMX_INVEPT)
1453                         /* CF==1 or ZF==1 --> rc = -1 */
1454                         "; ja 1f ; ud2 ; 1:\n"
1455                         : : "a" (&operand), "c" (ext) : "cc", "memory");
1456 }
1457
1458 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1459 {
1460         int i;
1461
1462         i = __find_msr_index(vmx, msr);
1463         if (i >= 0)
1464                 return &vmx->guest_msrs[i];
1465         return NULL;
1466 }
1467
1468 static void vmcs_clear(struct vmcs *vmcs)
1469 {
1470         u64 phys_addr = __pa(vmcs);
1471         u8 error;
1472
1473         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1474                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1475                       : "cc", "memory");
1476         if (error)
1477                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1478                        vmcs, phys_addr);
1479 }
1480
1481 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1482 {
1483         vmcs_clear(loaded_vmcs->vmcs);
1484         if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1485                 vmcs_clear(loaded_vmcs->shadow_vmcs);
1486         loaded_vmcs->cpu = -1;
1487         loaded_vmcs->launched = 0;
1488 }
1489
1490 static void vmcs_load(struct vmcs *vmcs)
1491 {
1492         u64 phys_addr = __pa(vmcs);
1493         u8 error;
1494
1495         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1496                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1497                         : "cc", "memory");
1498         if (error)
1499                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1500                        vmcs, phys_addr);
1501 }
1502
1503 #ifdef CONFIG_KEXEC_CORE
1504 /*
1505  * This bitmap is used to indicate whether the vmclear
1506  * operation is enabled on all cpus. All disabled by
1507  * default.
1508  */
1509 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1510
1511 static inline void crash_enable_local_vmclear(int cpu)
1512 {
1513         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1514 }
1515
1516 static inline void crash_disable_local_vmclear(int cpu)
1517 {
1518         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1519 }
1520
1521 static inline int crash_local_vmclear_enabled(int cpu)
1522 {
1523         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1524 }
1525
1526 static void crash_vmclear_local_loaded_vmcss(void)
1527 {
1528         int cpu = raw_smp_processor_id();
1529         struct loaded_vmcs *v;
1530
1531         if (!crash_local_vmclear_enabled(cpu))
1532                 return;
1533
1534         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1535                             loaded_vmcss_on_cpu_link)
1536                 vmcs_clear(v->vmcs);
1537 }
1538 #else
1539 static inline void crash_enable_local_vmclear(int cpu) { }
1540 static inline void crash_disable_local_vmclear(int cpu) { }
1541 #endif /* CONFIG_KEXEC_CORE */
1542
1543 static void __loaded_vmcs_clear(void *arg)
1544 {
1545         struct loaded_vmcs *loaded_vmcs = arg;
1546         int cpu = raw_smp_processor_id();
1547
1548         if (loaded_vmcs->cpu != cpu)
1549                 return; /* vcpu migration can race with cpu offline */
1550         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1551                 per_cpu(current_vmcs, cpu) = NULL;
1552         crash_disable_local_vmclear(cpu);
1553         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1554
1555         /*
1556          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1557          * is before setting loaded_vmcs->vcpu to -1 which is done in
1558          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1559          * then adds the vmcs into percpu list before it is deleted.
1560          */
1561         smp_wmb();
1562
1563         loaded_vmcs_init(loaded_vmcs);
1564         crash_enable_local_vmclear(cpu);
1565 }
1566
1567 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1568 {
1569         int cpu = loaded_vmcs->cpu;
1570
1571         if (cpu != -1)
1572                 smp_call_function_single(cpu,
1573                          __loaded_vmcs_clear, loaded_vmcs, 1);
1574 }
1575
1576 static inline void vpid_sync_vcpu_single(int vpid)
1577 {
1578         if (vpid == 0)
1579                 return;
1580
1581         if (cpu_has_vmx_invvpid_single())
1582                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
1583 }
1584
1585 static inline void vpid_sync_vcpu_global(void)
1586 {
1587         if (cpu_has_vmx_invvpid_global())
1588                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1589 }
1590
1591 static inline void vpid_sync_context(int vpid)
1592 {
1593         if (cpu_has_vmx_invvpid_single())
1594                 vpid_sync_vcpu_single(vpid);
1595         else
1596                 vpid_sync_vcpu_global();
1597 }
1598
1599 static inline void ept_sync_global(void)
1600 {
1601         if (cpu_has_vmx_invept_global())
1602                 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1603 }
1604
1605 static inline void ept_sync_context(u64 eptp)
1606 {
1607         if (enable_ept) {
1608                 if (cpu_has_vmx_invept_context())
1609                         __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1610                 else
1611                         ept_sync_global();
1612         }
1613 }
1614
1615 static __always_inline void vmcs_check16(unsigned long field)
1616 {
1617         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1618                          "16-bit accessor invalid for 64-bit field");
1619         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1620                          "16-bit accessor invalid for 64-bit high field");
1621         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1622                          "16-bit accessor invalid for 32-bit high field");
1623         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1624                          "16-bit accessor invalid for natural width field");
1625 }
1626
1627 static __always_inline void vmcs_check32(unsigned long field)
1628 {
1629         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1630                          "32-bit accessor invalid for 16-bit field");
1631         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1632                          "32-bit accessor invalid for natural width field");
1633 }
1634
1635 static __always_inline void vmcs_check64(unsigned long field)
1636 {
1637         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1638                          "64-bit accessor invalid for 16-bit field");
1639         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1640                          "64-bit accessor invalid for 64-bit high field");
1641         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1642                          "64-bit accessor invalid for 32-bit field");
1643         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1644                          "64-bit accessor invalid for natural width field");
1645 }
1646
1647 static __always_inline void vmcs_checkl(unsigned long field)
1648 {
1649         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1650                          "Natural width accessor invalid for 16-bit field");
1651         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1652                          "Natural width accessor invalid for 64-bit field");
1653         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1654                          "Natural width accessor invalid for 64-bit high field");
1655         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1656                          "Natural width accessor invalid for 32-bit field");
1657 }
1658
1659 static __always_inline unsigned long __vmcs_readl(unsigned long field)
1660 {
1661         unsigned long value;
1662
1663         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1664                       : "=a"(value) : "d"(field) : "cc");
1665         return value;
1666 }
1667
1668 static __always_inline u16 vmcs_read16(unsigned long field)
1669 {
1670         vmcs_check16(field);
1671         return __vmcs_readl(field);
1672 }
1673
1674 static __always_inline u32 vmcs_read32(unsigned long field)
1675 {
1676         vmcs_check32(field);
1677         return __vmcs_readl(field);
1678 }
1679
1680 static __always_inline u64 vmcs_read64(unsigned long field)
1681 {
1682         vmcs_check64(field);
1683 #ifdef CONFIG_X86_64
1684         return __vmcs_readl(field);
1685 #else
1686         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
1687 #endif
1688 }
1689
1690 static __always_inline unsigned long vmcs_readl(unsigned long field)
1691 {
1692         vmcs_checkl(field);
1693         return __vmcs_readl(field);
1694 }
1695
1696 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1697 {
1698         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1699                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1700         dump_stack();
1701 }
1702
1703 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
1704 {
1705         u8 error;
1706
1707         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1708                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1709         if (unlikely(error))
1710                 vmwrite_error(field, value);
1711 }
1712
1713 static __always_inline void vmcs_write16(unsigned long field, u16 value)
1714 {
1715         vmcs_check16(field);
1716         __vmcs_writel(field, value);
1717 }
1718
1719 static __always_inline void vmcs_write32(unsigned long field, u32 value)
1720 {
1721         vmcs_check32(field);
1722         __vmcs_writel(field, value);
1723 }
1724
1725 static __always_inline void vmcs_write64(unsigned long field, u64 value)
1726 {
1727         vmcs_check64(field);
1728         __vmcs_writel(field, value);
1729 #ifndef CONFIG_X86_64
1730         asm volatile ("");
1731         __vmcs_writel(field+1, value >> 32);
1732 #endif
1733 }
1734
1735 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
1736 {
1737         vmcs_checkl(field);
1738         __vmcs_writel(field, value);
1739 }
1740
1741 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
1742 {
1743         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1744                          "vmcs_clear_bits does not support 64-bit fields");
1745         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
1746 }
1747
1748 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
1749 {
1750         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1751                          "vmcs_set_bits does not support 64-bit fields");
1752         __vmcs_writel(field, __vmcs_readl(field) | mask);
1753 }
1754
1755 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
1756 {
1757         vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
1758 }
1759
1760 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1761 {
1762         vmcs_write32(VM_ENTRY_CONTROLS, val);
1763         vmx->vm_entry_controls_shadow = val;
1764 }
1765
1766 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1767 {
1768         if (vmx->vm_entry_controls_shadow != val)
1769                 vm_entry_controls_init(vmx, val);
1770 }
1771
1772 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1773 {
1774         return vmx->vm_entry_controls_shadow;
1775 }
1776
1777
1778 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1779 {
1780         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1781 }
1782
1783 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1784 {
1785         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1786 }
1787
1788 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
1789 {
1790         vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
1791 }
1792
1793 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1794 {
1795         vmcs_write32(VM_EXIT_CONTROLS, val);
1796         vmx->vm_exit_controls_shadow = val;
1797 }
1798
1799 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1800 {
1801         if (vmx->vm_exit_controls_shadow != val)
1802                 vm_exit_controls_init(vmx, val);
1803 }
1804
1805 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1806 {
1807         return vmx->vm_exit_controls_shadow;
1808 }
1809
1810
1811 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1812 {
1813         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1814 }
1815
1816 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1817 {
1818         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1819 }
1820
1821 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1822 {
1823         vmx->segment_cache.bitmask = 0;
1824 }
1825
1826 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1827                                        unsigned field)
1828 {
1829         bool ret;
1830         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1831
1832         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1833                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1834                 vmx->segment_cache.bitmask = 0;
1835         }
1836         ret = vmx->segment_cache.bitmask & mask;
1837         vmx->segment_cache.bitmask |= mask;
1838         return ret;
1839 }
1840
1841 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1842 {
1843         u16 *p = &vmx->segment_cache.seg[seg].selector;
1844
1845         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1846                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1847         return *p;
1848 }
1849
1850 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1851 {
1852         ulong *p = &vmx->segment_cache.seg[seg].base;
1853
1854         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1855                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1856         return *p;
1857 }
1858
1859 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1860 {
1861         u32 *p = &vmx->segment_cache.seg[seg].limit;
1862
1863         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1864                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1865         return *p;
1866 }
1867
1868 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1869 {
1870         u32 *p = &vmx->segment_cache.seg[seg].ar;
1871
1872         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1873                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1874         return *p;
1875 }
1876
1877 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1878 {
1879         u32 eb;
1880
1881         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1882              (1u << DB_VECTOR) | (1u << AC_VECTOR);
1883         if ((vcpu->guest_debug &
1884              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1885             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1886                 eb |= 1u << BP_VECTOR;
1887         if (to_vmx(vcpu)->rmode.vm86_active)
1888                 eb = ~0;
1889         if (enable_ept)
1890                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1891
1892         /* When we are running a nested L2 guest and L1 specified for it a
1893          * certain exception bitmap, we must trap the same exceptions and pass
1894          * them to L1. When running L2, we will only handle the exceptions
1895          * specified above if L1 did not want them.
1896          */
1897         if (is_guest_mode(vcpu))
1898                 eb |= get_vmcs12(vcpu)->exception_bitmap;
1899
1900         vmcs_write32(EXCEPTION_BITMAP, eb);
1901 }
1902
1903 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1904                 unsigned long entry, unsigned long exit)
1905 {
1906         vm_entry_controls_clearbit(vmx, entry);
1907         vm_exit_controls_clearbit(vmx, exit);
1908 }
1909
1910 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1911 {
1912         unsigned i;
1913         struct msr_autoload *m = &vmx->msr_autoload;
1914
1915         switch (msr) {
1916         case MSR_EFER:
1917                 if (cpu_has_load_ia32_efer) {
1918                         clear_atomic_switch_msr_special(vmx,
1919                                         VM_ENTRY_LOAD_IA32_EFER,
1920                                         VM_EXIT_LOAD_IA32_EFER);
1921                         return;
1922                 }
1923                 break;
1924         case MSR_CORE_PERF_GLOBAL_CTRL:
1925                 if (cpu_has_load_perf_global_ctrl) {
1926                         clear_atomic_switch_msr_special(vmx,
1927                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1928                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1929                         return;
1930                 }
1931                 break;
1932         }
1933
1934         for (i = 0; i < m->nr; ++i)
1935                 if (m->guest[i].index == msr)
1936                         break;
1937
1938         if (i == m->nr)
1939                 return;
1940         --m->nr;
1941         m->guest[i] = m->guest[m->nr];
1942         m->host[i] = m->host[m->nr];
1943         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1944         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1945 }
1946
1947 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1948                 unsigned long entry, unsigned long exit,
1949                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1950                 u64 guest_val, u64 host_val)
1951 {
1952         vmcs_write64(guest_val_vmcs, guest_val);
1953         vmcs_write64(host_val_vmcs, host_val);
1954         vm_entry_controls_setbit(vmx, entry);
1955         vm_exit_controls_setbit(vmx, exit);
1956 }
1957
1958 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1959                                   u64 guest_val, u64 host_val)
1960 {
1961         unsigned i;
1962         struct msr_autoload *m = &vmx->msr_autoload;
1963
1964         switch (msr) {
1965         case MSR_EFER:
1966                 if (cpu_has_load_ia32_efer) {
1967                         add_atomic_switch_msr_special(vmx,
1968                                         VM_ENTRY_LOAD_IA32_EFER,
1969                                         VM_EXIT_LOAD_IA32_EFER,
1970                                         GUEST_IA32_EFER,
1971                                         HOST_IA32_EFER,
1972                                         guest_val, host_val);
1973                         return;
1974                 }
1975                 break;
1976         case MSR_CORE_PERF_GLOBAL_CTRL:
1977                 if (cpu_has_load_perf_global_ctrl) {
1978                         add_atomic_switch_msr_special(vmx,
1979                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1980                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1981                                         GUEST_IA32_PERF_GLOBAL_CTRL,
1982                                         HOST_IA32_PERF_GLOBAL_CTRL,
1983                                         guest_val, host_val);
1984                         return;
1985                 }
1986                 break;
1987         case MSR_IA32_PEBS_ENABLE:
1988                 /* PEBS needs a quiescent period after being disabled (to write
1989                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
1990                  * provide that period, so a CPU could write host's record into
1991                  * guest's memory.
1992                  */
1993                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1994         }
1995
1996         for (i = 0; i < m->nr; ++i)
1997                 if (m->guest[i].index == msr)
1998                         break;
1999
2000         if (i == NR_AUTOLOAD_MSRS) {
2001                 printk_once(KERN_WARNING "Not enough msr switch entries. "
2002                                 "Can't add msr %x\n", msr);
2003                 return;
2004         } else if (i == m->nr) {
2005                 ++m->nr;
2006                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2007                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2008         }
2009
2010         m->guest[i].index = msr;
2011         m->guest[i].value = guest_val;
2012         m->host[i].index = msr;
2013         m->host[i].value = host_val;
2014 }
2015
2016 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2017 {
2018         u64 guest_efer = vmx->vcpu.arch.efer;
2019         u64 ignore_bits = 0;
2020
2021         if (!enable_ept) {
2022                 /*
2023                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
2024                  * host CPUID is more efficient than testing guest CPUID
2025                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
2026                  */
2027                 if (boot_cpu_has(X86_FEATURE_SMEP))
2028                         guest_efer |= EFER_NX;
2029                 else if (!(guest_efer & EFER_NX))
2030                         ignore_bits |= EFER_NX;
2031         }
2032
2033         /*
2034          * LMA and LME handled by hardware; SCE meaningless outside long mode.
2035          */
2036         ignore_bits |= EFER_SCE;
2037 #ifdef CONFIG_X86_64
2038         ignore_bits |= EFER_LMA | EFER_LME;
2039         /* SCE is meaningful only in long mode on Intel */
2040         if (guest_efer & EFER_LMA)
2041                 ignore_bits &= ~(u64)EFER_SCE;
2042 #endif
2043
2044         clear_atomic_switch_msr(vmx, MSR_EFER);
2045
2046         /*
2047          * On EPT, we can't emulate NX, so we must switch EFER atomically.
2048          * On CPUs that support "load IA32_EFER", always switch EFER
2049          * atomically, since it's faster than switching it manually.
2050          */
2051         if (cpu_has_load_ia32_efer ||
2052             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2053                 if (!(guest_efer & EFER_LMA))
2054                         guest_efer &= ~EFER_LME;
2055                 if (guest_efer != host_efer)
2056                         add_atomic_switch_msr(vmx, MSR_EFER,
2057                                               guest_efer, host_efer);
2058                 return false;
2059         } else {
2060                 guest_efer &= ~ignore_bits;
2061                 guest_efer |= host_efer & ignore_bits;
2062
2063                 vmx->guest_msrs[efer_offset].data = guest_efer;
2064                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2065
2066                 return true;
2067         }
2068 }
2069
2070 #ifdef CONFIG_X86_32
2071 /*
2072  * On 32-bit kernels, VM exits still load the FS and GS bases from the
2073  * VMCS rather than the segment table.  KVM uses this helper to figure
2074  * out the current bases to poke them into the VMCS before entry.
2075  */
2076 static unsigned long segment_base(u16 selector)
2077 {
2078         struct desc_struct *table;
2079         unsigned long v;
2080
2081         if (!(selector & ~SEGMENT_RPL_MASK))
2082                 return 0;
2083
2084         table = get_current_gdt_ro();
2085
2086         if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2087                 u16 ldt_selector = kvm_read_ldt();
2088
2089                 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2090                         return 0;
2091
2092                 table = (struct desc_struct *)segment_base(ldt_selector);
2093         }
2094         v = get_desc_base(&table[selector >> 3]);
2095         return v;
2096 }
2097 #endif
2098
2099 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2100 {
2101         struct vcpu_vmx *vmx = to_vmx(vcpu);
2102         int i;
2103
2104         if (vmx->host_state.loaded)
2105                 return;
2106
2107         vmx->host_state.loaded = 1;
2108         /*
2109          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2110          * allow segment selectors with cpl > 0 or ti == 1.
2111          */
2112         vmx->host_state.ldt_sel = kvm_read_ldt();
2113         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
2114         savesegment(fs, vmx->host_state.fs_sel);
2115         if (!(vmx->host_state.fs_sel & 7)) {
2116                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
2117                 vmx->host_state.fs_reload_needed = 0;
2118         } else {
2119                 vmcs_write16(HOST_FS_SELECTOR, 0);
2120                 vmx->host_state.fs_reload_needed = 1;
2121         }
2122         savesegment(gs, vmx->host_state.gs_sel);
2123         if (!(vmx->host_state.gs_sel & 7))
2124                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
2125         else {
2126                 vmcs_write16(HOST_GS_SELECTOR, 0);
2127                 vmx->host_state.gs_ldt_reload_needed = 1;
2128         }
2129
2130 #ifdef CONFIG_X86_64
2131         savesegment(ds, vmx->host_state.ds_sel);
2132         savesegment(es, vmx->host_state.es_sel);
2133 #endif
2134
2135 #ifdef CONFIG_X86_64
2136         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
2137         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
2138 #else
2139         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2140         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2141 #endif
2142
2143 #ifdef CONFIG_X86_64
2144         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2145         if (is_long_mode(&vmx->vcpu))
2146                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2147 #endif
2148         if (boot_cpu_has(X86_FEATURE_MPX))
2149                 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2150         for (i = 0; i < vmx->save_nmsrs; ++i)
2151                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2152                                    vmx->guest_msrs[i].data,
2153                                    vmx->guest_msrs[i].mask);
2154 }
2155
2156 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2157 {
2158         if (!vmx->host_state.loaded)
2159                 return;
2160
2161         ++vmx->vcpu.stat.host_state_reload;
2162         vmx->host_state.loaded = 0;
2163 #ifdef CONFIG_X86_64
2164         if (is_long_mode(&vmx->vcpu))
2165                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2166 #endif
2167         if (vmx->host_state.gs_ldt_reload_needed) {
2168                 kvm_load_ldt(vmx->host_state.ldt_sel);
2169 #ifdef CONFIG_X86_64
2170                 load_gs_index(vmx->host_state.gs_sel);
2171 #else
2172                 loadsegment(gs, vmx->host_state.gs_sel);
2173 #endif
2174         }
2175         if (vmx->host_state.fs_reload_needed)
2176                 loadsegment(fs, vmx->host_state.fs_sel);
2177 #ifdef CONFIG_X86_64
2178         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2179                 loadsegment(ds, vmx->host_state.ds_sel);
2180                 loadsegment(es, vmx->host_state.es_sel);
2181         }
2182 #endif
2183         invalidate_tss_limit();
2184 #ifdef CONFIG_X86_64
2185         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2186 #endif
2187         if (vmx->host_state.msr_host_bndcfgs)
2188                 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2189         load_fixmap_gdt(raw_smp_processor_id());
2190 }
2191
2192 static void vmx_load_host_state(struct vcpu_vmx *vmx)
2193 {
2194         preempt_disable();
2195         __vmx_load_host_state(vmx);
2196         preempt_enable();
2197 }
2198
2199 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2200 {
2201         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2202         struct pi_desc old, new;
2203         unsigned int dest;
2204
2205         /*
2206          * In case of hot-plug or hot-unplug, we may have to undo
2207          * vmx_vcpu_pi_put even if there is no assigned device.  And we
2208          * always keep PI.NDST up to date for simplicity: it makes the
2209          * code easier, and CPU migration is not a fast path.
2210          */
2211         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
2212                 return;
2213
2214         /*
2215          * First handle the simple case where no cmpxchg is necessary; just
2216          * allow posting non-urgent interrupts.
2217          *
2218          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2219          * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2220          * expects the VCPU to be on the blocked_vcpu_list that matches
2221          * PI.NDST.
2222          */
2223         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2224             vcpu->cpu == cpu) {
2225                 pi_clear_sn(pi_desc);
2226                 return;
2227         }
2228
2229         /* The full case.  */
2230         do {
2231                 old.control = new.control = pi_desc->control;
2232
2233                 dest = cpu_physical_id(cpu);
2234
2235                 if (x2apic_enabled())
2236                         new.ndst = dest;
2237                 else
2238                         new.ndst = (dest << 8) & 0xFF00;
2239
2240                 new.sn = 0;
2241         } while (cmpxchg64(&pi_desc->control, old.control,
2242                            new.control) != old.control);
2243 }
2244
2245 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2246 {
2247         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2248         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2249 }
2250
2251 /*
2252  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2253  * vcpu mutex is already taken.
2254  */
2255 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2256 {
2257         struct vcpu_vmx *vmx = to_vmx(vcpu);
2258         bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
2259
2260         if (!already_loaded) {
2261                 loaded_vmcs_clear(vmx->loaded_vmcs);
2262                 local_irq_disable();
2263                 crash_disable_local_vmclear(cpu);
2264
2265                 /*
2266                  * Read loaded_vmcs->cpu should be before fetching
2267                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
2268                  * See the comments in __loaded_vmcs_clear().
2269                  */
2270                 smp_rmb();
2271
2272                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2273                          &per_cpu(loaded_vmcss_on_cpu, cpu));
2274                 crash_enable_local_vmclear(cpu);
2275                 local_irq_enable();
2276         }
2277
2278         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2279                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2280                 vmcs_load(vmx->loaded_vmcs->vmcs);
2281         }
2282
2283         if (!already_loaded) {
2284                 void *gdt = get_current_gdt_ro();
2285                 unsigned long sysenter_esp;
2286
2287                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2288
2289                 /*
2290                  * Linux uses per-cpu TSS and GDT, so set these when switching
2291                  * processors.  See 22.2.4.
2292                  */
2293                 vmcs_writel(HOST_TR_BASE,
2294                             (unsigned long)this_cpu_ptr(&cpu_tss));
2295                 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
2296
2297                 /*
2298                  * VM exits change the host TR limit to 0x67 after a VM
2299                  * exit.  This is okay, since 0x67 covers everything except
2300                  * the IO bitmap and have have code to handle the IO bitmap
2301                  * being lost after a VM exit.
2302                  */
2303                 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
2304
2305                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2306                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2307
2308                 vmx->loaded_vmcs->cpu = cpu;
2309         }
2310
2311         /* Setup TSC multiplier */
2312         if (kvm_has_tsc_control &&
2313             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2314                 decache_tsc_multiplier(vmx);
2315
2316         vmx_vcpu_pi_load(vcpu, cpu);
2317         vmx->host_pkru = read_pkru();
2318 }
2319
2320 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2321 {
2322         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2323
2324         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2325                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
2326                 !kvm_vcpu_apicv_active(vcpu))
2327                 return;
2328
2329         /* Set SN when the vCPU is preempted */
2330         if (vcpu->preempted)
2331                 pi_set_sn(pi_desc);
2332 }
2333
2334 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2335 {
2336         vmx_vcpu_pi_put(vcpu);
2337
2338         __vmx_load_host_state(to_vmx(vcpu));
2339 }
2340
2341 static bool emulation_required(struct kvm_vcpu *vcpu)
2342 {
2343         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2344 }
2345
2346 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2347
2348 /*
2349  * Return the cr0 value that a nested guest would read. This is a combination
2350  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2351  * its hypervisor (cr0_read_shadow).
2352  */
2353 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2354 {
2355         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2356                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2357 }
2358 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2359 {
2360         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2361                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2362 }
2363
2364 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2365 {
2366         unsigned long rflags, save_rflags;
2367
2368         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2369                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2370                 rflags = vmcs_readl(GUEST_RFLAGS);
2371                 if (to_vmx(vcpu)->rmode.vm86_active) {
2372                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2373                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2374                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2375                 }
2376                 to_vmx(vcpu)->rflags = rflags;
2377         }
2378         return to_vmx(vcpu)->rflags;
2379 }
2380
2381 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2382 {
2383         unsigned long old_rflags = vmx_get_rflags(vcpu);
2384
2385         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2386         to_vmx(vcpu)->rflags = rflags;
2387         if (to_vmx(vcpu)->rmode.vm86_active) {
2388                 to_vmx(vcpu)->rmode.save_rflags = rflags;
2389                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2390         }
2391         vmcs_writel(GUEST_RFLAGS, rflags);
2392
2393         if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2394                 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2395 }
2396
2397 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2398 {
2399         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2400         int ret = 0;
2401
2402         if (interruptibility & GUEST_INTR_STATE_STI)
2403                 ret |= KVM_X86_SHADOW_INT_STI;
2404         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2405                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2406
2407         return ret;
2408 }
2409
2410 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2411 {
2412         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2413         u32 interruptibility = interruptibility_old;
2414
2415         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2416
2417         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2418                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2419         else if (mask & KVM_X86_SHADOW_INT_STI)
2420                 interruptibility |= GUEST_INTR_STATE_STI;
2421
2422         if ((interruptibility != interruptibility_old))
2423                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2424 }
2425
2426 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2427 {
2428         unsigned long rip;
2429
2430         rip = kvm_rip_read(vcpu);
2431         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2432         kvm_rip_write(vcpu, rip);
2433
2434         /* skipping an emulated instruction also counts */
2435         vmx_set_interrupt_shadow(vcpu, 0);
2436 }
2437
2438 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
2439                                                unsigned long exit_qual)
2440 {
2441         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2442         unsigned int nr = vcpu->arch.exception.nr;
2443         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2444
2445         if (vcpu->arch.exception.has_error_code) {
2446                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
2447                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2448         }
2449
2450         if (kvm_exception_is_soft(nr))
2451                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2452         else
2453                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2454
2455         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
2456             vmx_get_nmi_mask(vcpu))
2457                 intr_info |= INTR_INFO_UNBLOCK_NMI;
2458
2459         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
2460 }
2461
2462 /*
2463  * KVM wants to inject page-faults which it got to the guest. This function
2464  * checks whether in a nested guest, we need to inject them to L1 or L2.
2465  */
2466 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
2467 {
2468         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2469         unsigned int nr = vcpu->arch.exception.nr;
2470
2471         if (nr == PF_VECTOR) {
2472                 if (vcpu->arch.exception.nested_apf) {
2473                         *exit_qual = vcpu->arch.apf.nested_apf_token;
2474                         return 1;
2475                 }
2476                 /*
2477                  * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
2478                  * The fix is to add the ancillary datum (CR2 or DR6) to structs
2479                  * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
2480                  * can be written only when inject_pending_event runs.  This should be
2481                  * conditional on a new capability---if the capability is disabled,
2482                  * kvm_multiple_exception would write the ancillary information to
2483                  * CR2 or DR6, for backwards ABI-compatibility.
2484                  */
2485                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
2486                                                     vcpu->arch.exception.error_code)) {
2487                         *exit_qual = vcpu->arch.cr2;
2488                         return 1;
2489                 }
2490         } else {
2491                 if (vmcs12->exception_bitmap & (1u << nr)) {
2492                         if (nr == DB_VECTOR)
2493                                 *exit_qual = vcpu->arch.dr6;
2494                         else
2495                                 *exit_qual = 0;
2496                         return 1;
2497                 }
2498         }
2499
2500         return 0;
2501 }
2502
2503 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
2504 {
2505         struct vcpu_vmx *vmx = to_vmx(vcpu);
2506         unsigned nr = vcpu->arch.exception.nr;
2507         bool has_error_code = vcpu->arch.exception.has_error_code;
2508         u32 error_code = vcpu->arch.exception.error_code;
2509         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2510
2511         if (has_error_code) {
2512                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2513                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2514         }
2515
2516         if (vmx->rmode.vm86_active) {
2517                 int inc_eip = 0;
2518                 if (kvm_exception_is_soft(nr))
2519                         inc_eip = vcpu->arch.event_exit_inst_len;
2520                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2521                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2522                 return;
2523         }
2524
2525         if (kvm_exception_is_soft(nr)) {
2526                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2527                              vmx->vcpu.arch.event_exit_inst_len);
2528                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2529         } else
2530                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2531
2532         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2533 }
2534
2535 static bool vmx_rdtscp_supported(void)
2536 {
2537         return cpu_has_vmx_rdtscp();
2538 }
2539
2540 static bool vmx_invpcid_supported(void)
2541 {
2542         return cpu_has_vmx_invpcid() && enable_ept;
2543 }
2544
2545 /*
2546  * Swap MSR entry in host/guest MSR entry array.
2547  */
2548 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2549 {
2550         struct shared_msr_entry tmp;
2551
2552         tmp = vmx->guest_msrs[to];
2553         vmx->guest_msrs[to] = vmx->guest_msrs[from];
2554         vmx->guest_msrs[from] = tmp;
2555 }
2556
2557 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2558 {
2559         unsigned long *msr_bitmap;
2560
2561         if (is_guest_mode(vcpu))
2562                 msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
2563         else if (cpu_has_secondary_exec_ctrls() &&
2564                  (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
2565                   SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
2566                 if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) {
2567                         if (is_long_mode(vcpu))
2568                                 msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv;
2569                         else
2570                                 msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv;
2571                 } else {
2572                         if (is_long_mode(vcpu))
2573                                 msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
2574                         else
2575                                 msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
2576                 }
2577         } else {
2578                 if (is_long_mode(vcpu))
2579                         msr_bitmap = vmx_msr_bitmap_longmode;
2580                 else
2581                         msr_bitmap = vmx_msr_bitmap_legacy;
2582         }
2583
2584         vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
2585 }
2586
2587 /*
2588  * Set up the vmcs to automatically save and restore system
2589  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
2590  * mode, as fiddling with msrs is very expensive.
2591  */
2592 static void setup_msrs(struct vcpu_vmx *vmx)
2593 {
2594         int save_nmsrs, index;
2595
2596         save_nmsrs = 0;
2597 #ifdef CONFIG_X86_64
2598         if (is_long_mode(&vmx->vcpu)) {
2599                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2600                 if (index >= 0)
2601                         move_msr_up(vmx, index, save_nmsrs++);
2602                 index = __find_msr_index(vmx, MSR_LSTAR);
2603                 if (index >= 0)
2604                         move_msr_up(vmx, index, save_nmsrs++);
2605                 index = __find_msr_index(vmx, MSR_CSTAR);
2606                 if (index >= 0)
2607                         move_msr_up(vmx, index, save_nmsrs++);
2608                 index = __find_msr_index(vmx, MSR_TSC_AUX);
2609                 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
2610                         move_msr_up(vmx, index, save_nmsrs++);
2611                 /*
2612                  * MSR_STAR is only needed on long mode guests, and only
2613                  * if efer.sce is enabled.
2614                  */
2615                 index = __find_msr_index(vmx, MSR_STAR);
2616                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2617                         move_msr_up(vmx, index, save_nmsrs++);
2618         }
2619 #endif
2620         index = __find_msr_index(vmx, MSR_EFER);
2621         if (index >= 0 && update_transition_efer(vmx, index))
2622                 move_msr_up(vmx, index, save_nmsrs++);
2623
2624         vmx->save_nmsrs = save_nmsrs;
2625
2626         if (cpu_has_vmx_msr_bitmap())
2627                 vmx_set_msr_bitmap(&vmx->vcpu);
2628 }
2629
2630 /*
2631  * reads and returns guest's timestamp counter "register"
2632  * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2633  * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2634  */
2635 static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2636 {
2637         u64 host_tsc, tsc_offset;
2638
2639         host_tsc = rdtsc();
2640         tsc_offset = vmcs_read64(TSC_OFFSET);
2641         return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
2642 }
2643
2644 /*
2645  * writes 'offset' into guest's timestamp counter offset register
2646  */
2647 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2648 {
2649         if (is_guest_mode(vcpu)) {
2650                 /*
2651                  * We're here if L1 chose not to trap WRMSR to TSC. According
2652                  * to the spec, this should set L1's TSC; The offset that L1
2653                  * set for L2 remains unchanged, and still needs to be added
2654                  * to the newly set TSC to get L2's TSC.
2655                  */
2656                 struct vmcs12 *vmcs12;
2657                 /* recalculate vmcs02.TSC_OFFSET: */
2658                 vmcs12 = get_vmcs12(vcpu);
2659                 vmcs_write64(TSC_OFFSET, offset +
2660                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2661                          vmcs12->tsc_offset : 0));
2662         } else {
2663                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2664                                            vmcs_read64(TSC_OFFSET), offset);
2665                 vmcs_write64(TSC_OFFSET, offset);
2666         }
2667 }
2668
2669 /*
2670  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2671  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2672  * all guests if the "nested" module option is off, and can also be disabled
2673  * for a single guest by disabling its VMX cpuid bit.
2674  */
2675 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2676 {
2677         return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
2678 }
2679
2680 /*
2681  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2682  * returned for the various VMX controls MSRs when nested VMX is enabled.
2683  * The same values should also be used to verify that vmcs12 control fields are
2684  * valid during nested entry from L1 to L2.
2685  * Each of these control msrs has a low and high 32-bit half: A low bit is on
2686  * if the corresponding bit in the (32-bit) control field *must* be on, and a
2687  * bit in the high half is on if the corresponding bit in the control field
2688  * may be on. See also vmx_control_verify().
2689  */
2690 static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2691 {
2692         /*
2693          * Note that as a general rule, the high half of the MSRs (bits in
2694          * the control fields which may be 1) should be initialized by the
2695          * intersection of the underlying hardware's MSR (i.e., features which
2696          * can be supported) and the list of features we want to expose -
2697          * because they are known to be properly supported in our code.
2698          * Also, usually, the low half of the MSRs (bits which must be 1) can
2699          * be set to 0, meaning that L1 may turn off any of these bits. The
2700          * reason is that if one of these bits is necessary, it will appear
2701          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2702          * fields of vmcs01 and vmcs02, will turn these bits off - and
2703          * nested_vmx_exit_reflected() will not pass related exits to L1.
2704          * These rules have exceptions below.
2705          */
2706
2707         /* pin-based controls */
2708         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2709                 vmx->nested.nested_vmx_pinbased_ctls_low,
2710                 vmx->nested.nested_vmx_pinbased_ctls_high);
2711         vmx->nested.nested_vmx_pinbased_ctls_low |=
2712                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2713         vmx->nested.nested_vmx_pinbased_ctls_high &=
2714                 PIN_BASED_EXT_INTR_MASK |
2715                 PIN_BASED_NMI_EXITING |
2716                 PIN_BASED_VIRTUAL_NMIS;
2717         vmx->nested.nested_vmx_pinbased_ctls_high |=
2718                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2719                 PIN_BASED_VMX_PREEMPTION_TIMER;
2720         if (kvm_vcpu_apicv_active(&vmx->vcpu))
2721                 vmx->nested.nested_vmx_pinbased_ctls_high |=
2722                         PIN_BASED_POSTED_INTR;
2723
2724         /* exit controls */
2725         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2726                 vmx->nested.nested_vmx_exit_ctls_low,
2727                 vmx->nested.nested_vmx_exit_ctls_high);
2728         vmx->nested.nested_vmx_exit_ctls_low =
2729                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2730
2731         vmx->nested.nested_vmx_exit_ctls_high &=
2732 #ifdef CONFIG_X86_64
2733                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2734 #endif
2735                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2736         vmx->nested.nested_vmx_exit_ctls_high |=
2737                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2738                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2739                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2740
2741         if (kvm_mpx_supported())
2742                 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2743
2744         /* We support free control of debug control saving. */
2745         vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2746
2747         /* entry controls */
2748         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2749                 vmx->nested.nested_vmx_entry_ctls_low,
2750                 vmx->nested.nested_vmx_entry_ctls_high);
2751         vmx->nested.nested_vmx_entry_ctls_low =
2752                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2753         vmx->nested.nested_vmx_entry_ctls_high &=
2754 #ifdef CONFIG_X86_64
2755                 VM_ENTRY_IA32E_MODE |
2756 #endif
2757                 VM_ENTRY_LOAD_IA32_PAT;
2758         vmx->nested.nested_vmx_entry_ctls_high |=
2759                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2760         if (kvm_mpx_supported())
2761                 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2762
2763         /* We support free control of debug control loading. */
2764         vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2765
2766         /* cpu-based controls */
2767         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2768                 vmx->nested.nested_vmx_procbased_ctls_low,
2769                 vmx->nested.nested_vmx_procbased_ctls_high);
2770         vmx->nested.nested_vmx_procbased_ctls_low =
2771                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2772         vmx->nested.nested_vmx_procbased_ctls_high &=
2773                 CPU_BASED_VIRTUAL_INTR_PENDING |
2774                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2775                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2776                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2777                 CPU_BASED_CR3_STORE_EXITING |
2778 #ifdef CONFIG_X86_64
2779                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2780 #endif
2781                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2782                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
2783                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
2784                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
2785                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2786         /*
2787          * We can allow some features even when not supported by the
2788          * hardware. For example, L1 can specify an MSR bitmap - and we
2789          * can use it to avoid exits to L1 - even when L0 runs L2
2790          * without MSR bitmaps.
2791          */
2792         vmx->nested.nested_vmx_procbased_ctls_high |=
2793                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2794                 CPU_BASED_USE_MSR_BITMAPS;
2795
2796         /* We support free control of CR3 access interception. */
2797         vmx->nested.nested_vmx_procbased_ctls_low &=
2798                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
2799
2800         /*
2801          * secondary cpu-based controls.  Do not include those that
2802          * depend on CPUID bits, they are added later by vmx_cpuid_update.
2803          */
2804         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2805                 vmx->nested.nested_vmx_secondary_ctls_low,
2806                 vmx->nested.nested_vmx_secondary_ctls_high);
2807         vmx->nested.nested_vmx_secondary_ctls_low = 0;
2808         vmx->nested.nested_vmx_secondary_ctls_high &=
2809                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2810                 SECONDARY_EXEC_DESC |
2811                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2812                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2813                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2814                 SECONDARY_EXEC_WBINVD_EXITING;
2815
2816         if (enable_ept) {
2817                 /* nested EPT: emulate EPT also to L1 */
2818                 vmx->nested.nested_vmx_secondary_ctls_high |=
2819                         SECONDARY_EXEC_ENABLE_EPT;
2820                 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2821                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
2822                 if (cpu_has_vmx_ept_execute_only())
2823                         vmx->nested.nested_vmx_ept_caps |=
2824                                 VMX_EPT_EXECUTE_ONLY_BIT;
2825                 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
2826                 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
2827                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
2828                         VMX_EPT_1GB_PAGE_BIT;
2829                 if (enable_ept_ad_bits) {
2830                         vmx->nested.nested_vmx_secondary_ctls_high |=
2831                                 SECONDARY_EXEC_ENABLE_PML;
2832                         vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
2833                 }
2834         } else
2835                 vmx->nested.nested_vmx_ept_caps = 0;
2836
2837         if (cpu_has_vmx_vmfunc()) {
2838                 vmx->nested.nested_vmx_secondary_ctls_high |=
2839                         SECONDARY_EXEC_ENABLE_VMFUNC;
2840                 /*
2841                  * Advertise EPTP switching unconditionally
2842                  * since we emulate it
2843                  */
2844                 vmx->nested.nested_vmx_vmfunc_controls =
2845                         VMX_VMFUNC_EPTP_SWITCHING;
2846         }
2847
2848         /*
2849          * Old versions of KVM use the single-context version without
2850          * checking for support, so declare that it is supported even
2851          * though it is treated as global context.  The alternative is
2852          * not failing the single-context invvpid, and it is worse.
2853          */
2854         if (enable_vpid) {
2855                 vmx->nested.nested_vmx_secondary_ctls_high |=
2856                         SECONDARY_EXEC_ENABLE_VPID;
2857                 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2858                         VMX_VPID_EXTENT_SUPPORTED_MASK;
2859         } else
2860                 vmx->nested.nested_vmx_vpid_caps = 0;
2861
2862         if (enable_unrestricted_guest)
2863                 vmx->nested.nested_vmx_secondary_ctls_high |=
2864                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
2865
2866         /* miscellaneous data */
2867         rdmsr(MSR_IA32_VMX_MISC,
2868                 vmx->nested.nested_vmx_misc_low,
2869                 vmx->nested.nested_vmx_misc_high);
2870         vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2871         vmx->nested.nested_vmx_misc_low |=
2872                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
2873                 VMX_MISC_ACTIVITY_HLT;
2874         vmx->nested.nested_vmx_misc_high = 0;
2875
2876         /*
2877          * This MSR reports some information about VMX support. We
2878          * should return information about the VMX we emulate for the
2879          * guest, and the VMCS structure we give it - not about the
2880          * VMX support of the underlying hardware.
2881          */
2882         vmx->nested.nested_vmx_basic =
2883                 VMCS12_REVISION |
2884                 VMX_BASIC_TRUE_CTLS |
2885                 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2886                 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2887
2888         if (cpu_has_vmx_basic_inout())
2889                 vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
2890
2891         /*
2892          * These MSRs specify bits which the guest must keep fixed on
2893          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2894          * We picked the standard core2 setting.
2895          */
2896 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2897 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
2898         vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
2899         vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
2900
2901         /* These MSRs specify bits which the guest must keep fixed off. */
2902         rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
2903         rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
2904
2905         /* highest index: VMX_PREEMPTION_TIMER_VALUE */
2906         vmx->nested.nested_vmx_vmcs_enum = 0x2e;
2907 }
2908
2909 /*
2910  * if fixed0[i] == 1: val[i] must be 1
2911  * if fixed1[i] == 0: val[i] must be 0
2912  */
2913 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
2914 {
2915         return ((val & fixed1) | fixed0) == val;
2916 }
2917
2918 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2919 {
2920         return fixed_bits_valid(control, low, high);
2921 }
2922
2923 static inline u64 vmx_control_msr(u32 low, u32 high)
2924 {
2925         return low | ((u64)high << 32);
2926 }
2927
2928 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
2929 {
2930         superset &= mask;
2931         subset &= mask;
2932
2933         return (superset | subset) == superset;
2934 }
2935
2936 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
2937 {
2938         const u64 feature_and_reserved =
2939                 /* feature (except bit 48; see below) */
2940                 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
2941                 /* reserved */
2942                 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
2943         u64 vmx_basic = vmx->nested.nested_vmx_basic;
2944
2945         if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
2946                 return -EINVAL;
2947
2948         /*
2949          * KVM does not emulate a version of VMX that constrains physical
2950          * addresses of VMX structures (e.g. VMCS) to 32-bits.
2951          */
2952         if (data & BIT_ULL(48))
2953                 return -EINVAL;
2954
2955         if (vmx_basic_vmcs_revision_id(vmx_basic) !=
2956             vmx_basic_vmcs_revision_id(data))
2957                 return -EINVAL;
2958
2959         if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
2960                 return -EINVAL;
2961
2962         vmx->nested.nested_vmx_basic = data;
2963         return 0;
2964 }
2965
2966 static int
2967 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
2968 {
2969         u64 supported;
2970         u32 *lowp, *highp;
2971
2972         switch (msr_index) {
2973         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2974                 lowp = &vmx->nested.nested_vmx_pinbased_ctls_low;
2975                 highp = &vmx->nested.nested_vmx_pinbased_ctls_high;
2976                 break;
2977         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2978                 lowp = &vmx->nested.nested_vmx_procbased_ctls_low;
2979                 highp = &vmx->nested.nested_vmx_procbased_ctls_high;
2980                 break;
2981         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2982                 lowp = &vmx->nested.nested_vmx_exit_ctls_low;
2983                 highp = &vmx->nested.nested_vmx_exit_ctls_high;
2984                 break;
2985         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2986                 lowp = &vmx->nested.nested_vmx_entry_ctls_low;
2987                 highp = &vmx->nested.nested_vmx_entry_ctls_high;
2988                 break;
2989         case MSR_IA32_VMX_PROCBASED_CTLS2:
2990                 lowp = &vmx->nested.nested_vmx_secondary_ctls_low;
2991                 highp = &vmx->nested.nested_vmx_secondary_ctls_high;
2992                 break;
2993         default:
2994                 BUG();
2995         }
2996
2997         supported = vmx_control_msr(*lowp, *highp);
2998
2999         /* Check must-be-1 bits are still 1. */
3000         if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
3001                 return -EINVAL;
3002
3003         /* Check must-be-0 bits are still 0. */
3004         if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
3005                 return -EINVAL;
3006
3007         *lowp = data;
3008         *highp = data >> 32;
3009         return 0;
3010 }
3011
3012 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3013 {
3014         const u64 feature_and_reserved_bits =
3015                 /* feature */
3016                 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3017                 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3018                 /* reserved */
3019                 GENMASK_ULL(13, 9) | BIT_ULL(31);
3020         u64 vmx_misc;
3021
3022         vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low,
3023                                    vmx->nested.nested_vmx_misc_high);
3024
3025         if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3026                 return -EINVAL;
3027
3028         if ((vmx->nested.nested_vmx_pinbased_ctls_high &
3029              PIN_BASED_VMX_PREEMPTION_TIMER) &&
3030             vmx_misc_preemption_timer_rate(data) !=
3031             vmx_misc_preemption_timer_rate(vmx_misc))
3032                 return -EINVAL;
3033
3034         if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3035                 return -EINVAL;
3036
3037         if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3038                 return -EINVAL;
3039
3040         if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3041                 return -EINVAL;
3042
3043         vmx->nested.nested_vmx_misc_low = data;
3044         vmx->nested.nested_vmx_misc_high = data >> 32;
3045         return 0;
3046 }
3047
3048 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3049 {
3050         u64 vmx_ept_vpid_cap;
3051
3052         vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
3053                                            vmx->nested.nested_vmx_vpid_caps);
3054
3055         /* Every bit is either reserved or a feature bit. */
3056         if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3057                 return -EINVAL;
3058
3059         vmx->nested.nested_vmx_ept_caps = data;
3060         vmx->nested.nested_vmx_vpid_caps = data >> 32;
3061         return 0;
3062 }
3063
3064 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3065 {
3066         u64 *msr;
3067
3068         switch (msr_index) {
3069         case MSR_IA32_VMX_CR0_FIXED0:
3070                 msr = &vmx->nested.nested_vmx_cr0_fixed0;
3071                 break;
3072         case MSR_IA32_VMX_CR4_FIXED0:
3073                 msr = &vmx->nested.nested_vmx_cr4_fixed0;
3074                 break;
3075         default:
3076                 BUG();
3077         }
3078
3079         /*
3080          * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3081          * must be 1 in the restored value.
3082          */
3083         if (!is_bitwise_subset(data, *msr, -1ULL))
3084                 return -EINVAL;
3085
3086         *msr = data;
3087         return 0;
3088 }
3089
3090 /*
3091  * Called when userspace is restoring VMX MSRs.
3092  *
3093  * Returns 0 on success, non-0 otherwise.
3094  */
3095 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3096 {
3097         struct vcpu_vmx *vmx = to_vmx(vcpu);
3098
3099         switch (msr_index) {
3100         case MSR_IA32_VMX_BASIC:
3101                 return vmx_restore_vmx_basic(vmx, data);
3102         case MSR_IA32_VMX_PINBASED_CTLS:
3103         case MSR_IA32_VMX_PROCBASED_CTLS:
3104         case MSR_IA32_VMX_EXIT_CTLS:
3105         case MSR_IA32_VMX_ENTRY_CTLS:
3106                 /*
3107                  * The "non-true" VMX capability MSRs are generated from the
3108                  * "true" MSRs, so we do not support restoring them directly.
3109                  *
3110                  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3111                  * should restore the "true" MSRs with the must-be-1 bits
3112                  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3113                  * DEFAULT SETTINGS".
3114                  */
3115                 return -EINVAL;
3116         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3117         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3118         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3119         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3120         case MSR_IA32_VMX_PROCBASED_CTLS2:
3121                 return vmx_restore_control_msr(vmx, msr_index, data);
3122         case MSR_IA32_VMX_MISC:
3123                 return vmx_restore_vmx_misc(vmx, data);
3124         case MSR_IA32_VMX_CR0_FIXED0:
3125         case MSR_IA32_VMX_CR4_FIXED0:
3126                 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3127         case MSR_IA32_VMX_CR0_FIXED1:
3128         case MSR_IA32_VMX_CR4_FIXED1:
3129                 /*
3130                  * These MSRs are generated based on the vCPU's CPUID, so we
3131                  * do not support restoring them directly.
3132                  */
3133                 return -EINVAL;
3134         case MSR_IA32_VMX_EPT_VPID_CAP:
3135                 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3136         case MSR_IA32_VMX_VMCS_ENUM:
3137                 vmx->nested.nested_vmx_vmcs_enum = data;
3138                 return 0;
3139         default:
3140                 /*
3141                  * The rest of the VMX capability MSRs do not support restore.
3142                  */
3143                 return -EINVAL;
3144         }
3145 }
3146
3147 /* Returns 0 on success, non-0 otherwise. */
3148 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
3149 {
3150         struct vcpu_vmx *vmx = to_vmx(vcpu);
3151
3152         switch (msr_index) {
3153         case MSR_IA32_VMX_BASIC:
3154                 *pdata = vmx->nested.nested_vmx_basic;
3155                 break;
3156         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3157         case MSR_IA32_VMX_PINBASED_CTLS:
3158                 *pdata = vmx_control_msr(
3159                         vmx->nested.nested_vmx_pinbased_ctls_low,
3160                         vmx->nested.nested_vmx_pinbased_ctls_high);
3161                 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3162                         *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3163                 break;
3164         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3165         case MSR_IA32_VMX_PROCBASED_CTLS:
3166                 *pdata = vmx_control_msr(
3167                         vmx->nested.nested_vmx_procbased_ctls_low,
3168                         vmx->nested.nested_vmx_procbased_ctls_high);
3169                 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3170                         *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3171                 break;
3172         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3173         case MSR_IA32_VMX_EXIT_CTLS:
3174                 *pdata = vmx_control_msr(
3175                         vmx->nested.nested_vmx_exit_ctls_low,
3176                         vmx->nested.nested_vmx_exit_ctls_high);
3177                 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3178                         *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3179                 break;
3180         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3181         case MSR_IA32_VMX_ENTRY_CTLS:
3182                 *pdata = vmx_control_msr(
3183                         vmx->nested.nested_vmx_entry_ctls_low,
3184                         vmx->nested.nested_vmx_entry_ctls_high);
3185                 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3186                         *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3187                 break;
3188         case MSR_IA32_VMX_MISC:
3189                 *pdata = vmx_control_msr(
3190                         vmx->nested.nested_vmx_misc_low,
3191                         vmx->nested.nested_vmx_misc_high);
3192                 break;
3193         case MSR_IA32_VMX_CR0_FIXED0:
3194                 *pdata = vmx->nested.nested_vmx_cr0_fixed0;
3195                 break;
3196         case MSR_IA32_VMX_CR0_FIXED1:
3197                 *pdata = vmx->nested.nested_vmx_cr0_fixed1;
3198                 break;
3199         case MSR_IA32_VMX_CR4_FIXED0:
3200                 *pdata = vmx->nested.nested_vmx_cr4_fixed0;
3201                 break;
3202         case MSR_IA32_VMX_CR4_FIXED1:
3203                 *pdata = vmx->nested.nested_vmx_cr4_fixed1;
3204                 break;
3205         case MSR_IA32_VMX_VMCS_ENUM:
3206                 *pdata = vmx->nested.nested_vmx_vmcs_enum;
3207                 break;
3208         case MSR_IA32_VMX_PROCBASED_CTLS2:
3209                 *pdata = vmx_control_msr(
3210                         vmx->nested.nested_vmx_secondary_ctls_low,
3211                         vmx->nested.nested_vmx_secondary_ctls_high);
3212                 break;
3213         case MSR_IA32_VMX_EPT_VPID_CAP:
3214                 *pdata = vmx->nested.nested_vmx_ept_caps |
3215                         ((u64)vmx->nested.nested_vmx_vpid_caps << 32);
3216                 break;
3217         case MSR_IA32_VMX_VMFUNC:
3218                 *pdata = vmx->nested.nested_vmx_vmfunc_controls;
3219                 break;
3220         default:
3221                 return 1;
3222         }
3223
3224         return 0;
3225 }
3226
3227 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
3228                                                  uint64_t val)
3229 {
3230         uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
3231
3232         return !(val & ~valid_bits);
3233 }
3234
3235 /*
3236  * Reads an msr value (of 'msr_index') into 'pdata'.
3237  * Returns 0 on success, non-0 otherwise.
3238  * Assumes vcpu_load() was already called.
3239  */
3240 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3241 {
3242         struct shared_msr_entry *msr;
3243
3244         switch (msr_info->index) {
3245 #ifdef CONFIG_X86_64
3246         case MSR_FS_BASE:
3247                 msr_info->data = vmcs_readl(GUEST_FS_BASE);
3248                 break;
3249         case MSR_GS_BASE:
3250                 msr_info->data = vmcs_readl(GUEST_GS_BASE);
3251                 break;
3252         case MSR_KERNEL_GS_BASE:
3253                 vmx_load_host_state(to_vmx(vcpu));
3254                 msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
3255                 break;
3256 #endif
3257         case MSR_EFER:
3258                 return kvm_get_msr_common(vcpu, msr_info);
3259         case MSR_IA32_TSC:
3260                 msr_info->data = guest_read_tsc(vcpu);
3261                 break;
3262         case MSR_IA32_SYSENTER_CS:
3263                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
3264                 break;
3265         case MSR_IA32_SYSENTER_EIP:
3266                 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
3267                 break;
3268         case MSR_IA32_SYSENTER_ESP:
3269                 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
3270                 break;
3271         case MSR_IA32_BNDCFGS:
3272                 if (!kvm_mpx_supported() ||
3273                     (!msr_info->host_initiated &&
3274                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3275                         return 1;
3276                 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
3277                 break;
3278         case MSR_IA32_MCG_EXT_CTL:
3279                 if (!msr_info->host_initiated &&
3280                     !(to_vmx(vcpu)->msr_ia32_feature_control &
3281                       FEATURE_CONTROL_LMCE))
3282                         return 1;
3283                 msr_info->data = vcpu->arch.mcg_ext_ctl;
3284                 break;
3285         case MSR_IA32_FEATURE_CONTROL:
3286                 msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
3287                 break;
3288         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3289                 if (!nested_vmx_allowed(vcpu))
3290                         return 1;
3291                 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
3292         case MSR_IA32_XSS:
3293                 if (!vmx_xsaves_supported())
3294                         return 1;
3295                 msr_info->data = vcpu->arch.ia32_xss;
3296                 break;
3297         case MSR_TSC_AUX:
3298                 if (!msr_info->host_initiated &&
3299                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3300                         return 1;
3301                 /* Otherwise falls through */
3302         default:
3303                 msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
3304                 if (msr) {
3305                         msr_info->data = msr->data;
3306                         break;
3307                 }
3308                 return kvm_get_msr_common(vcpu, msr_info);
3309         }
3310
3311         return 0;
3312 }
3313
3314 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
3315
3316 /*
3317  * Writes msr value into into the appropriate "register".
3318  * Returns 0 on success, non-0 otherwise.
3319  * Assumes vcpu_load() was already called.
3320  */
3321 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3322 {
3323         struct vcpu_vmx *vmx = to_vmx(vcpu);
3324         struct shared_msr_entry *msr;
3325         int ret = 0;
3326         u32 msr_index = msr_info->index;
3327         u64 data = msr_info->data;
3328
3329         switch (msr_index) {
3330         case MSR_EFER:
3331                 ret = kvm_set_msr_common(vcpu, msr_info);
3332                 break;
3333 #ifdef CONFIG_X86_64
3334         case MSR_FS_BASE:
3335                 vmx_segment_cache_clear(vmx);
3336                 vmcs_writel(GUEST_FS_BASE, data);
3337                 break;
3338         case MSR_GS_BASE:
3339                 vmx_segment_cache_clear(vmx);
3340                 vmcs_writel(GUEST_GS_BASE, data);
3341                 break;
3342         case MSR_KERNEL_GS_BASE:
3343                 vmx_load_host_state(vmx);
3344                 vmx->msr_guest_kernel_gs_base = data;
3345                 break;
3346 #endif
3347         case MSR_IA32_SYSENTER_CS:
3348                 vmcs_write32(GUEST_SYSENTER_CS, data);
3349                 break;
3350         case MSR_IA32_SYSENTER_EIP:
3351                 vmcs_writel(GUEST_SYSENTER_EIP, data);
3352                 break;
3353         case MSR_IA32_SYSENTER_ESP:
3354                 vmcs_writel(GUEST_SYSENTER_ESP, data);
3355                 break;
3356         case MSR_IA32_BNDCFGS:
3357                 if (!kvm_mpx_supported() ||
3358                     (!msr_info->host_initiated &&
3359                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3360                         return 1;
3361                 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
3362                     (data & MSR_IA32_BNDCFGS_RSVD))
3363                         return 1;
3364                 vmcs_write64(GUEST_BNDCFGS, data);
3365                 break;
3366         case MSR_IA32_TSC:
3367                 kvm_write_tsc(vcpu, msr_info);
3368                 break;
3369         case MSR_IA32_CR_PAT:
3370                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
3371                         if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3372                                 return 1;
3373                         vmcs_write64(GUEST_IA32_PAT, data);
3374                         vcpu->arch.pat = data;
3375                         break;
3376                 }
3377                 ret = kvm_set_msr_common(vcpu, msr_info);
3378                 break;
3379         case MSR_IA32_TSC_ADJUST:
3380                 ret = kvm_set_msr_common(vcpu, msr_info);
3381                 break;
3382         case MSR_IA32_MCG_EXT_CTL:
3383                 if ((!msr_info->host_initiated &&
3384                      !(to_vmx(vcpu)->msr_ia32_feature_control &
3385                        FEATURE_CONTROL_LMCE)) ||
3386                     (data & ~MCG_EXT_CTL_LMCE_EN))
3387                         return 1;
3388                 vcpu->arch.mcg_ext_ctl = data;
3389                 break;
3390         case MSR_IA32_FEATURE_CONTROL:
3391                 if (!vmx_feature_control_msr_valid(vcpu, data) ||
3392                     (to_vmx(vcpu)->msr_ia32_feature_control &
3393                      FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
3394                         return 1;
3395                 vmx->msr_ia32_feature_control = data;
3396                 if (msr_info->host_initiated && data == 0)