Merge tag 'gfs2-4.16.rc1.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include <linux/nospec.h>
38 #include "kvm_cache_regs.h"
39 #include "x86.h"
40
41 #include <asm/cpu.h>
42 #include <asm/io.h>
43 #include <asm/desc.h>
44 #include <asm/vmx.h>
45 #include <asm/virtext.h>
46 #include <asm/mce.h>
47 #include <asm/fpu/internal.h>
48 #include <asm/perf_event.h>
49 #include <asm/debugreg.h>
50 #include <asm/kexec.h>
51 #include <asm/apic.h>
52 #include <asm/irq_remapping.h>
53 #include <asm/mmu_context.h>
54 #include <asm/nospec-branch.h>
55
56 #include "trace.h"
57 #include "pmu.h"
58
59 #define __ex(x) __kvm_handle_fault_on_reboot(x)
60 #define __ex_clear(x, reg) \
61         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
62
63 MODULE_AUTHOR("Qumranet");
64 MODULE_LICENSE("GPL");
65
66 static const struct x86_cpu_id vmx_cpu_id[] = {
67         X86_FEATURE_MATCH(X86_FEATURE_VMX),
68         {}
69 };
70 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
71
72 static bool __read_mostly enable_vpid = 1;
73 module_param_named(vpid, enable_vpid, bool, 0444);
74
75 static bool __read_mostly enable_vnmi = 1;
76 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
77
78 static bool __read_mostly flexpriority_enabled = 1;
79 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
80
81 static bool __read_mostly enable_ept = 1;
82 module_param_named(ept, enable_ept, bool, S_IRUGO);
83
84 static bool __read_mostly enable_unrestricted_guest = 1;
85 module_param_named(unrestricted_guest,
86                         enable_unrestricted_guest, bool, S_IRUGO);
87
88 static bool __read_mostly enable_ept_ad_bits = 1;
89 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
90
91 static bool __read_mostly emulate_invalid_guest_state = true;
92 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
93
94 static bool __read_mostly fasteoi = 1;
95 module_param(fasteoi, bool, S_IRUGO);
96
97 static bool __read_mostly enable_apicv = 1;
98 module_param(enable_apicv, bool, S_IRUGO);
99
100 static bool __read_mostly enable_shadow_vmcs = 1;
101 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
102 /*
103  * If nested=1, nested virtualization is supported, i.e., guests may use
104  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
105  * use VMX instructions.
106  */
107 static bool __read_mostly nested = 0;
108 module_param(nested, bool, S_IRUGO);
109
110 static u64 __read_mostly host_xss;
111
112 static bool __read_mostly enable_pml = 1;
113 module_param_named(pml, enable_pml, bool, S_IRUGO);
114
115 #define MSR_TYPE_R      1
116 #define MSR_TYPE_W      2
117 #define MSR_TYPE_RW     3
118
119 #define MSR_BITMAP_MODE_X2APIC          1
120 #define MSR_BITMAP_MODE_X2APIC_APICV    2
121 #define MSR_BITMAP_MODE_LM              4
122
123 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
124
125 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
126 static int __read_mostly cpu_preemption_timer_multi;
127 static bool __read_mostly enable_preemption_timer = 1;
128 #ifdef CONFIG_X86_64
129 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
130 #endif
131
132 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
133 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
134 #define KVM_VM_CR0_ALWAYS_ON                                            \
135         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
136 #define KVM_CR4_GUEST_OWNED_BITS                                      \
137         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
138          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
139
140 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
141 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
142
143 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
144
145 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
146
147 /*
148  * Hyper-V requires all of these, so mark them as supported even though
149  * they are just treated the same as all-context.
150  */
151 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
152         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
153         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
154         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
155         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
156
157 /*
158  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
159  * ple_gap:    upper bound on the amount of time between two successive
160  *             executions of PAUSE in a loop. Also indicate if ple enabled.
161  *             According to test, this time is usually smaller than 128 cycles.
162  * ple_window: upper bound on the amount of time a guest is allowed to execute
163  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
164  *             less than 2^12 cycles
165  * Time is measured based on a counter that runs at the same rate as the TSC,
166  * refer SDM volume 3b section 21.6.13 & 22.1.3.
167  */
168 #define KVM_VMX_DEFAULT_PLE_GAP           128
169 #define KVM_VMX_DEFAULT_PLE_WINDOW        4096
170 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW   2
171 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
172 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX    \
173                 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
174
175 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
176 module_param(ple_gap, int, S_IRUGO);
177
178 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
179 module_param(ple_window, int, S_IRUGO);
180
181 /* Default doubles per-vcpu window every exit. */
182 static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
183 module_param(ple_window_grow, int, S_IRUGO);
184
185 /* Default resets per-vcpu window every exit to ple_window. */
186 static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
187 module_param(ple_window_shrink, int, S_IRUGO);
188
189 /* Default is to compute the maximum so we can never overflow. */
190 static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
191 static int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
192 module_param(ple_window_max, int, S_IRUGO);
193
194 extern const ulong vmx_return;
195
196 #define NR_AUTOLOAD_MSRS 8
197
198 struct vmcs {
199         u32 revision_id;
200         u32 abort;
201         char data[0];
202 };
203
204 /*
205  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
206  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
207  * loaded on this CPU (so we can clear them if the CPU goes down).
208  */
209 struct loaded_vmcs {
210         struct vmcs *vmcs;
211         struct vmcs *shadow_vmcs;
212         int cpu;
213         bool launched;
214         bool nmi_known_unmasked;
215         unsigned long vmcs_host_cr3;    /* May not match real cr3 */
216         unsigned long vmcs_host_cr4;    /* May not match real cr4 */
217         /* Support for vnmi-less CPUs */
218         int soft_vnmi_blocked;
219         ktime_t entry_time;
220         s64 vnmi_blocked_time;
221         unsigned long *msr_bitmap;
222         struct list_head loaded_vmcss_on_cpu_link;
223 };
224
225 struct shared_msr_entry {
226         unsigned index;
227         u64 data;
228         u64 mask;
229 };
230
231 /*
232  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
233  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
234  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
235  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
236  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
237  * More than one of these structures may exist, if L1 runs multiple L2 guests.
238  * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
239  * underlying hardware which will be used to run L2.
240  * This structure is packed to ensure that its layout is identical across
241  * machines (necessary for live migration).
242  * If there are changes in this struct, VMCS12_REVISION must be changed.
243  */
244 typedef u64 natural_width;
245 struct __packed vmcs12 {
246         /* According to the Intel spec, a VMCS region must start with the
247          * following two fields. Then follow implementation-specific data.
248          */
249         u32 revision_id;
250         u32 abort;
251
252         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
253         u32 padding[7]; /* room for future expansion */
254
255         u64 io_bitmap_a;
256         u64 io_bitmap_b;
257         u64 msr_bitmap;
258         u64 vm_exit_msr_store_addr;
259         u64 vm_exit_msr_load_addr;
260         u64 vm_entry_msr_load_addr;
261         u64 tsc_offset;
262         u64 virtual_apic_page_addr;
263         u64 apic_access_addr;
264         u64 posted_intr_desc_addr;
265         u64 vm_function_control;
266         u64 ept_pointer;
267         u64 eoi_exit_bitmap0;
268         u64 eoi_exit_bitmap1;
269         u64 eoi_exit_bitmap2;
270         u64 eoi_exit_bitmap3;
271         u64 eptp_list_address;
272         u64 xss_exit_bitmap;
273         u64 guest_physical_address;
274         u64 vmcs_link_pointer;
275         u64 pml_address;
276         u64 guest_ia32_debugctl;
277         u64 guest_ia32_pat;
278         u64 guest_ia32_efer;
279         u64 guest_ia32_perf_global_ctrl;
280         u64 guest_pdptr0;
281         u64 guest_pdptr1;
282         u64 guest_pdptr2;
283         u64 guest_pdptr3;
284         u64 guest_bndcfgs;
285         u64 host_ia32_pat;
286         u64 host_ia32_efer;
287         u64 host_ia32_perf_global_ctrl;
288         u64 padding64[8]; /* room for future expansion */
289         /*
290          * To allow migration of L1 (complete with its L2 guests) between
291          * machines of different natural widths (32 or 64 bit), we cannot have
292          * unsigned long fields with no explict size. We use u64 (aliased
293          * natural_width) instead. Luckily, x86 is little-endian.
294          */
295         natural_width cr0_guest_host_mask;
296         natural_width cr4_guest_host_mask;
297         natural_width cr0_read_shadow;
298         natural_width cr4_read_shadow;
299         natural_width cr3_target_value0;
300         natural_width cr3_target_value1;
301         natural_width cr3_target_value2;
302         natural_width cr3_target_value3;
303         natural_width exit_qualification;
304         natural_width guest_linear_address;
305         natural_width guest_cr0;
306         natural_width guest_cr3;
307         natural_width guest_cr4;
308         natural_width guest_es_base;
309         natural_width guest_cs_base;
310         natural_width guest_ss_base;
311         natural_width guest_ds_base;
312         natural_width guest_fs_base;
313         natural_width guest_gs_base;
314         natural_width guest_ldtr_base;
315         natural_width guest_tr_base;
316         natural_width guest_gdtr_base;
317         natural_width guest_idtr_base;
318         natural_width guest_dr7;
319         natural_width guest_rsp;
320         natural_width guest_rip;
321         natural_width guest_rflags;
322         natural_width guest_pending_dbg_exceptions;
323         natural_width guest_sysenter_esp;
324         natural_width guest_sysenter_eip;
325         natural_width host_cr0;
326         natural_width host_cr3;
327         natural_width host_cr4;
328         natural_width host_fs_base;
329         natural_width host_gs_base;
330         natural_width host_tr_base;
331         natural_width host_gdtr_base;
332         natural_width host_idtr_base;
333         natural_width host_ia32_sysenter_esp;
334         natural_width host_ia32_sysenter_eip;
335         natural_width host_rsp;
336         natural_width host_rip;
337         natural_width paddingl[8]; /* room for future expansion */
338         u32 pin_based_vm_exec_control;
339         u32 cpu_based_vm_exec_control;
340         u32 exception_bitmap;
341         u32 page_fault_error_code_mask;
342         u32 page_fault_error_code_match;
343         u32 cr3_target_count;
344         u32 vm_exit_controls;
345         u32 vm_exit_msr_store_count;
346         u32 vm_exit_msr_load_count;
347         u32 vm_entry_controls;
348         u32 vm_entry_msr_load_count;
349         u32 vm_entry_intr_info_field;
350         u32 vm_entry_exception_error_code;
351         u32 vm_entry_instruction_len;
352         u32 tpr_threshold;
353         u32 secondary_vm_exec_control;
354         u32 vm_instruction_error;
355         u32 vm_exit_reason;
356         u32 vm_exit_intr_info;
357         u32 vm_exit_intr_error_code;
358         u32 idt_vectoring_info_field;
359         u32 idt_vectoring_error_code;
360         u32 vm_exit_instruction_len;
361         u32 vmx_instruction_info;
362         u32 guest_es_limit;
363         u32 guest_cs_limit;
364         u32 guest_ss_limit;
365         u32 guest_ds_limit;
366         u32 guest_fs_limit;
367         u32 guest_gs_limit;
368         u32 guest_ldtr_limit;
369         u32 guest_tr_limit;
370         u32 guest_gdtr_limit;
371         u32 guest_idtr_limit;
372         u32 guest_es_ar_bytes;
373         u32 guest_cs_ar_bytes;
374         u32 guest_ss_ar_bytes;
375         u32 guest_ds_ar_bytes;
376         u32 guest_fs_ar_bytes;
377         u32 guest_gs_ar_bytes;
378         u32 guest_ldtr_ar_bytes;
379         u32 guest_tr_ar_bytes;
380         u32 guest_interruptibility_info;
381         u32 guest_activity_state;
382         u32 guest_sysenter_cs;
383         u32 host_ia32_sysenter_cs;
384         u32 vmx_preemption_timer_value;
385         u32 padding32[7]; /* room for future expansion */
386         u16 virtual_processor_id;
387         u16 posted_intr_nv;
388         u16 guest_es_selector;
389         u16 guest_cs_selector;
390         u16 guest_ss_selector;
391         u16 guest_ds_selector;
392         u16 guest_fs_selector;
393         u16 guest_gs_selector;
394         u16 guest_ldtr_selector;
395         u16 guest_tr_selector;
396         u16 guest_intr_status;
397         u16 guest_pml_index;
398         u16 host_es_selector;
399         u16 host_cs_selector;
400         u16 host_ss_selector;
401         u16 host_ds_selector;
402         u16 host_fs_selector;
403         u16 host_gs_selector;
404         u16 host_tr_selector;
405 };
406
407 /*
408  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
409  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
410  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
411  */
412 #define VMCS12_REVISION 0x11e57ed0
413
414 /*
415  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
416  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
417  * current implementation, 4K are reserved to avoid future complications.
418  */
419 #define VMCS12_SIZE 0x1000
420
421 /*
422  * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
423  * supported VMCS12 field encoding.
424  */
425 #define VMCS12_MAX_FIELD_INDEX 0x17
426
427 /*
428  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
429  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
430  */
431 struct nested_vmx {
432         /* Has the level1 guest done vmxon? */
433         bool vmxon;
434         gpa_t vmxon_ptr;
435         bool pml_full;
436
437         /* The guest-physical address of the current VMCS L1 keeps for L2 */
438         gpa_t current_vmptr;
439         /*
440          * Cache of the guest's VMCS, existing outside of guest memory.
441          * Loaded from guest memory during VMPTRLD. Flushed to guest
442          * memory during VMCLEAR and VMPTRLD.
443          */
444         struct vmcs12 *cached_vmcs12;
445         /*
446          * Indicates if the shadow vmcs must be updated with the
447          * data hold by vmcs12
448          */
449         bool sync_shadow_vmcs;
450         bool dirty_vmcs12;
451
452         bool change_vmcs01_virtual_x2apic_mode;
453         /* L2 must run next, and mustn't decide to exit to L1. */
454         bool nested_run_pending;
455
456         struct loaded_vmcs vmcs02;
457
458         /*
459          * Guest pages referred to in the vmcs02 with host-physical
460          * pointers, so we must keep them pinned while L2 runs.
461          */
462         struct page *apic_access_page;
463         struct page *virtual_apic_page;
464         struct page *pi_desc_page;
465         struct pi_desc *pi_desc;
466         bool pi_pending;
467         u16 posted_intr_nv;
468
469         struct hrtimer preemption_timer;
470         bool preemption_timer_expired;
471
472         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
473         u64 vmcs01_debugctl;
474
475         u16 vpid02;
476         u16 last_vpid;
477
478         /*
479          * We only store the "true" versions of the VMX capability MSRs. We
480          * generate the "non-true" versions by setting the must-be-1 bits
481          * according to the SDM.
482          */
483         u32 nested_vmx_procbased_ctls_low;
484         u32 nested_vmx_procbased_ctls_high;
485         u32 nested_vmx_secondary_ctls_low;
486         u32 nested_vmx_secondary_ctls_high;
487         u32 nested_vmx_pinbased_ctls_low;
488         u32 nested_vmx_pinbased_ctls_high;
489         u32 nested_vmx_exit_ctls_low;
490         u32 nested_vmx_exit_ctls_high;
491         u32 nested_vmx_entry_ctls_low;
492         u32 nested_vmx_entry_ctls_high;
493         u32 nested_vmx_misc_low;
494         u32 nested_vmx_misc_high;
495         u32 nested_vmx_ept_caps;
496         u32 nested_vmx_vpid_caps;
497         u64 nested_vmx_basic;
498         u64 nested_vmx_cr0_fixed0;
499         u64 nested_vmx_cr0_fixed1;
500         u64 nested_vmx_cr4_fixed0;
501         u64 nested_vmx_cr4_fixed1;
502         u64 nested_vmx_vmcs_enum;
503         u64 nested_vmx_vmfunc_controls;
504
505         /* SMM related state */
506         struct {
507                 /* in VMX operation on SMM entry? */
508                 bool vmxon;
509                 /* in guest mode on SMM entry? */
510                 bool guest_mode;
511         } smm;
512 };
513
514 #define POSTED_INTR_ON  0
515 #define POSTED_INTR_SN  1
516
517 /* Posted-Interrupt Descriptor */
518 struct pi_desc {
519         u32 pir[8];     /* Posted interrupt requested */
520         union {
521                 struct {
522                                 /* bit 256 - Outstanding Notification */
523                         u16     on      : 1,
524                                 /* bit 257 - Suppress Notification */
525                                 sn      : 1,
526                                 /* bit 271:258 - Reserved */
527                                 rsvd_1  : 14;
528                                 /* bit 279:272 - Notification Vector */
529                         u8      nv;
530                                 /* bit 287:280 - Reserved */
531                         u8      rsvd_2;
532                                 /* bit 319:288 - Notification Destination */
533                         u32     ndst;
534                 };
535                 u64 control;
536         };
537         u32 rsvd[6];
538 } __aligned(64);
539
540 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
541 {
542         return test_and_set_bit(POSTED_INTR_ON,
543                         (unsigned long *)&pi_desc->control);
544 }
545
546 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
547 {
548         return test_and_clear_bit(POSTED_INTR_ON,
549                         (unsigned long *)&pi_desc->control);
550 }
551
552 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
553 {
554         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
555 }
556
557 static inline void pi_clear_sn(struct pi_desc *pi_desc)
558 {
559         return clear_bit(POSTED_INTR_SN,
560                         (unsigned long *)&pi_desc->control);
561 }
562
563 static inline void pi_set_sn(struct pi_desc *pi_desc)
564 {
565         return set_bit(POSTED_INTR_SN,
566                         (unsigned long *)&pi_desc->control);
567 }
568
569 static inline void pi_clear_on(struct pi_desc *pi_desc)
570 {
571         clear_bit(POSTED_INTR_ON,
572                   (unsigned long *)&pi_desc->control);
573 }
574
575 static inline int pi_test_on(struct pi_desc *pi_desc)
576 {
577         return test_bit(POSTED_INTR_ON,
578                         (unsigned long *)&pi_desc->control);
579 }
580
581 static inline int pi_test_sn(struct pi_desc *pi_desc)
582 {
583         return test_bit(POSTED_INTR_SN,
584                         (unsigned long *)&pi_desc->control);
585 }
586
587 struct vcpu_vmx {
588         struct kvm_vcpu       vcpu;
589         unsigned long         host_rsp;
590         u8                    fail;
591         u8                    msr_bitmap_mode;
592         u32                   exit_intr_info;
593         u32                   idt_vectoring_info;
594         ulong                 rflags;
595         struct shared_msr_entry *guest_msrs;
596         int                   nmsrs;
597         int                   save_nmsrs;
598         unsigned long         host_idt_base;
599 #ifdef CONFIG_X86_64
600         u64                   msr_host_kernel_gs_base;
601         u64                   msr_guest_kernel_gs_base;
602 #endif
603
604         u64                   arch_capabilities;
605         u64                   spec_ctrl;
606
607         u32 vm_entry_controls_shadow;
608         u32 vm_exit_controls_shadow;
609         u32 secondary_exec_control;
610
611         /*
612          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
613          * non-nested (L1) guest, it always points to vmcs01. For a nested
614          * guest (L2), it points to a different VMCS.
615          */
616         struct loaded_vmcs    vmcs01;
617         struct loaded_vmcs   *loaded_vmcs;
618         bool                  __launched; /* temporary, used in vmx_vcpu_run */
619         struct msr_autoload {
620                 unsigned nr;
621                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
622                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
623         } msr_autoload;
624         struct {
625                 int           loaded;
626                 u16           fs_sel, gs_sel, ldt_sel;
627 #ifdef CONFIG_X86_64
628                 u16           ds_sel, es_sel;
629 #endif
630                 int           gs_ldt_reload_needed;
631                 int           fs_reload_needed;
632                 u64           msr_host_bndcfgs;
633         } host_state;
634         struct {
635                 int vm86_active;
636                 ulong save_rflags;
637                 struct kvm_segment segs[8];
638         } rmode;
639         struct {
640                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
641                 struct kvm_save_segment {
642                         u16 selector;
643                         unsigned long base;
644                         u32 limit;
645                         u32 ar;
646                 } seg[8];
647         } segment_cache;
648         int vpid;
649         bool emulation_required;
650
651         u32 exit_reason;
652
653         /* Posted interrupt descriptor */
654         struct pi_desc pi_desc;
655
656         /* Support for a guest hypervisor (nested VMX) */
657         struct nested_vmx nested;
658
659         /* Dynamic PLE window. */
660         int ple_window;
661         bool ple_window_dirty;
662
663         /* Support for PML */
664 #define PML_ENTITY_NUM          512
665         struct page *pml_pg;
666
667         /* apic deadline value in host tsc */
668         u64 hv_deadline_tsc;
669
670         u64 current_tsc_ratio;
671
672         u32 host_pkru;
673
674         unsigned long host_debugctlmsr;
675
676         /*
677          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
678          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
679          * in msr_ia32_feature_control_valid_bits.
680          */
681         u64 msr_ia32_feature_control;
682         u64 msr_ia32_feature_control_valid_bits;
683 };
684
685 enum segment_cache_field {
686         SEG_FIELD_SEL = 0,
687         SEG_FIELD_BASE = 1,
688         SEG_FIELD_LIMIT = 2,
689         SEG_FIELD_AR = 3,
690
691         SEG_FIELD_NR = 4
692 };
693
694 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
695 {
696         return container_of(vcpu, struct vcpu_vmx, vcpu);
697 }
698
699 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
700 {
701         return &(to_vmx(vcpu)->pi_desc);
702 }
703
704 #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
705 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
706 #define FIELD(number, name)     [ROL16(number, 6)] = VMCS12_OFFSET(name)
707 #define FIELD64(number, name)                                           \
708         FIELD(number, name),                                            \
709         [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
710
711
712 static u16 shadow_read_only_fields[] = {
713 #define SHADOW_FIELD_RO(x) x,
714 #include "vmx_shadow_fields.h"
715 };
716 static int max_shadow_read_only_fields =
717         ARRAY_SIZE(shadow_read_only_fields);
718
719 static u16 shadow_read_write_fields[] = {
720 #define SHADOW_FIELD_RW(x) x,
721 #include "vmx_shadow_fields.h"
722 };
723 static int max_shadow_read_write_fields =
724         ARRAY_SIZE(shadow_read_write_fields);
725
726 static const unsigned short vmcs_field_to_offset_table[] = {
727         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
728         FIELD(POSTED_INTR_NV, posted_intr_nv),
729         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
730         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
731         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
732         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
733         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
734         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
735         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
736         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
737         FIELD(GUEST_INTR_STATUS, guest_intr_status),
738         FIELD(GUEST_PML_INDEX, guest_pml_index),
739         FIELD(HOST_ES_SELECTOR, host_es_selector),
740         FIELD(HOST_CS_SELECTOR, host_cs_selector),
741         FIELD(HOST_SS_SELECTOR, host_ss_selector),
742         FIELD(HOST_DS_SELECTOR, host_ds_selector),
743         FIELD(HOST_FS_SELECTOR, host_fs_selector),
744         FIELD(HOST_GS_SELECTOR, host_gs_selector),
745         FIELD(HOST_TR_SELECTOR, host_tr_selector),
746         FIELD64(IO_BITMAP_A, io_bitmap_a),
747         FIELD64(IO_BITMAP_B, io_bitmap_b),
748         FIELD64(MSR_BITMAP, msr_bitmap),
749         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
750         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
751         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
752         FIELD64(TSC_OFFSET, tsc_offset),
753         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
754         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
755         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
756         FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
757         FIELD64(EPT_POINTER, ept_pointer),
758         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
759         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
760         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
761         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
762         FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
763         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
764         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
765         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
766         FIELD64(PML_ADDRESS, pml_address),
767         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
768         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
769         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
770         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
771         FIELD64(GUEST_PDPTR0, guest_pdptr0),
772         FIELD64(GUEST_PDPTR1, guest_pdptr1),
773         FIELD64(GUEST_PDPTR2, guest_pdptr2),
774         FIELD64(GUEST_PDPTR3, guest_pdptr3),
775         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
776         FIELD64(HOST_IA32_PAT, host_ia32_pat),
777         FIELD64(HOST_IA32_EFER, host_ia32_efer),
778         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
779         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
780         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
781         FIELD(EXCEPTION_BITMAP, exception_bitmap),
782         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
783         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
784         FIELD(CR3_TARGET_COUNT, cr3_target_count),
785         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
786         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
787         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
788         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
789         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
790         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
791         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
792         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
793         FIELD(TPR_THRESHOLD, tpr_threshold),
794         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
795         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
796         FIELD(VM_EXIT_REASON, vm_exit_reason),
797         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
798         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
799         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
800         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
801         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
802         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
803         FIELD(GUEST_ES_LIMIT, guest_es_limit),
804         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
805         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
806         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
807         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
808         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
809         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
810         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
811         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
812         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
813         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
814         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
815         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
816         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
817         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
818         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
819         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
820         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
821         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
822         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
823         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
824         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
825         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
826         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
827         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
828         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
829         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
830         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
831         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
832         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
833         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
834         FIELD(EXIT_QUALIFICATION, exit_qualification),
835         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
836         FIELD(GUEST_CR0, guest_cr0),
837         FIELD(GUEST_CR3, guest_cr3),
838         FIELD(GUEST_CR4, guest_cr4),
839         FIELD(GUEST_ES_BASE, guest_es_base),
840         FIELD(GUEST_CS_BASE, guest_cs_base),
841         FIELD(GUEST_SS_BASE, guest_ss_base),
842         FIELD(GUEST_DS_BASE, guest_ds_base),
843         FIELD(GUEST_FS_BASE, guest_fs_base),
844         FIELD(GUEST_GS_BASE, guest_gs_base),
845         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
846         FIELD(GUEST_TR_BASE, guest_tr_base),
847         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
848         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
849         FIELD(GUEST_DR7, guest_dr7),
850         FIELD(GUEST_RSP, guest_rsp),
851         FIELD(GUEST_RIP, guest_rip),
852         FIELD(GUEST_RFLAGS, guest_rflags),
853         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
854         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
855         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
856         FIELD(HOST_CR0, host_cr0),
857         FIELD(HOST_CR3, host_cr3),
858         FIELD(HOST_CR4, host_cr4),
859         FIELD(HOST_FS_BASE, host_fs_base),
860         FIELD(HOST_GS_BASE, host_gs_base),
861         FIELD(HOST_TR_BASE, host_tr_base),
862         FIELD(HOST_GDTR_BASE, host_gdtr_base),
863         FIELD(HOST_IDTR_BASE, host_idtr_base),
864         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
865         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
866         FIELD(HOST_RSP, host_rsp),
867         FIELD(HOST_RIP, host_rip),
868 };
869
870 static inline short vmcs_field_to_offset(unsigned long field)
871 {
872         const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
873         unsigned short offset;
874         unsigned index;
875
876         if (field >> 15)
877                 return -ENOENT;
878
879         index = ROL16(field, 6);
880         if (index >= size)
881                 return -ENOENT;
882
883         index = array_index_nospec(index, size);
884         offset = vmcs_field_to_offset_table[index];
885         if (offset == 0)
886                 return -ENOENT;
887         return offset;
888 }
889
890 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
891 {
892         return to_vmx(vcpu)->nested.cached_vmcs12;
893 }
894
895 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
896 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
897 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
898 static bool vmx_xsaves_supported(void);
899 static void vmx_set_segment(struct kvm_vcpu *vcpu,
900                             struct kvm_segment *var, int seg);
901 static void vmx_get_segment(struct kvm_vcpu *vcpu,
902                             struct kvm_segment *var, int seg);
903 static bool guest_state_valid(struct kvm_vcpu *vcpu);
904 static u32 vmx_segment_access_rights(struct kvm_segment *var);
905 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
906 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
907 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
908 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
909                                             u16 error_code);
910 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
911 static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
912                                                           u32 msr, int type);
913
914 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
915 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
916 /*
917  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
918  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
919  */
920 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
921
922 /*
923  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
924  * can find which vCPU should be waken up.
925  */
926 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
927 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
928
929 enum {
930         VMX_VMREAD_BITMAP,
931         VMX_VMWRITE_BITMAP,
932         VMX_BITMAP_NR
933 };
934
935 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
936
937 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
938 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
939
940 static bool cpu_has_load_ia32_efer;
941 static bool cpu_has_load_perf_global_ctrl;
942
943 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
944 static DEFINE_SPINLOCK(vmx_vpid_lock);
945
946 static struct vmcs_config {
947         int size;
948         int order;
949         u32 basic_cap;
950         u32 revision_id;
951         u32 pin_based_exec_ctrl;
952         u32 cpu_based_exec_ctrl;
953         u32 cpu_based_2nd_exec_ctrl;
954         u32 vmexit_ctrl;
955         u32 vmentry_ctrl;
956 } vmcs_config;
957
958 static struct vmx_capability {
959         u32 ept;
960         u32 vpid;
961 } vmx_capability;
962
963 #define VMX_SEGMENT_FIELD(seg)                                  \
964         [VCPU_SREG_##seg] = {                                   \
965                 .selector = GUEST_##seg##_SELECTOR,             \
966                 .base = GUEST_##seg##_BASE,                     \
967                 .limit = GUEST_##seg##_LIMIT,                   \
968                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
969         }
970
971 static const struct kvm_vmx_segment_field {
972         unsigned selector;
973         unsigned base;
974         unsigned limit;
975         unsigned ar_bytes;
976 } kvm_vmx_segment_fields[] = {
977         VMX_SEGMENT_FIELD(CS),
978         VMX_SEGMENT_FIELD(DS),
979         VMX_SEGMENT_FIELD(ES),
980         VMX_SEGMENT_FIELD(FS),
981         VMX_SEGMENT_FIELD(GS),
982         VMX_SEGMENT_FIELD(SS),
983         VMX_SEGMENT_FIELD(TR),
984         VMX_SEGMENT_FIELD(LDTR),
985 };
986
987 static u64 host_efer;
988
989 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
990
991 /*
992  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
993  * away by decrementing the array size.
994  */
995 static const u32 vmx_msr_index[] = {
996 #ifdef CONFIG_X86_64
997         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
998 #endif
999         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1000 };
1001
1002 static inline bool is_exception_n(u32 intr_info, u8 vector)
1003 {
1004         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1005                              INTR_INFO_VALID_MASK)) ==
1006                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1007 }
1008
1009 static inline bool is_debug(u32 intr_info)
1010 {
1011         return is_exception_n(intr_info, DB_VECTOR);
1012 }
1013
1014 static inline bool is_breakpoint(u32 intr_info)
1015 {
1016         return is_exception_n(intr_info, BP_VECTOR);
1017 }
1018
1019 static inline bool is_page_fault(u32 intr_info)
1020 {
1021         return is_exception_n(intr_info, PF_VECTOR);
1022 }
1023
1024 static inline bool is_no_device(u32 intr_info)
1025 {
1026         return is_exception_n(intr_info, NM_VECTOR);
1027 }
1028
1029 static inline bool is_invalid_opcode(u32 intr_info)
1030 {
1031         return is_exception_n(intr_info, UD_VECTOR);
1032 }
1033
1034 static inline bool is_external_interrupt(u32 intr_info)
1035 {
1036         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1037                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1038 }
1039
1040 static inline bool is_machine_check(u32 intr_info)
1041 {
1042         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1043                              INTR_INFO_VALID_MASK)) ==
1044                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1045 }
1046
1047 static inline bool cpu_has_vmx_msr_bitmap(void)
1048 {
1049         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1050 }
1051
1052 static inline bool cpu_has_vmx_tpr_shadow(void)
1053 {
1054         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1055 }
1056
1057 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1058 {
1059         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1060 }
1061
1062 static inline bool cpu_has_secondary_exec_ctrls(void)
1063 {
1064         return vmcs_config.cpu_based_exec_ctrl &
1065                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1066 }
1067
1068 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1069 {
1070         return vmcs_config.cpu_based_2nd_exec_ctrl &
1071                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1072 }
1073
1074 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1075 {
1076         return vmcs_config.cpu_based_2nd_exec_ctrl &
1077                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1078 }
1079
1080 static inline bool cpu_has_vmx_apic_register_virt(void)
1081 {
1082         return vmcs_config.cpu_based_2nd_exec_ctrl &
1083                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1084 }
1085
1086 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1087 {
1088         return vmcs_config.cpu_based_2nd_exec_ctrl &
1089                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1090 }
1091
1092 /*
1093  * Comment's format: document - errata name - stepping - processor name.
1094  * Refer from
1095  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1096  */
1097 static u32 vmx_preemption_cpu_tfms[] = {
1098 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1099 0x000206E6,
1100 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1101 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1102 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1103 0x00020652,
1104 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1105 0x00020655,
1106 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1107 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1108 /*
1109  * 320767.pdf - AAP86  - B1 -
1110  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1111  */
1112 0x000106E5,
1113 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1114 0x000106A0,
1115 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1116 0x000106A1,
1117 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1118 0x000106A4,
1119  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1120  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1121  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1122 0x000106A5,
1123 };
1124
1125 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1126 {
1127         u32 eax = cpuid_eax(0x00000001), i;
1128
1129         /* Clear the reserved bits */
1130         eax &= ~(0x3U << 14 | 0xfU << 28);
1131         for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1132                 if (eax == vmx_preemption_cpu_tfms[i])
1133                         return true;
1134
1135         return false;
1136 }
1137
1138 static inline bool cpu_has_vmx_preemption_timer(void)
1139 {
1140         return vmcs_config.pin_based_exec_ctrl &
1141                 PIN_BASED_VMX_PREEMPTION_TIMER;
1142 }
1143
1144 static inline bool cpu_has_vmx_posted_intr(void)
1145 {
1146         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1147                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1148 }
1149
1150 static inline bool cpu_has_vmx_apicv(void)
1151 {
1152         return cpu_has_vmx_apic_register_virt() &&
1153                 cpu_has_vmx_virtual_intr_delivery() &&
1154                 cpu_has_vmx_posted_intr();
1155 }
1156
1157 static inline bool cpu_has_vmx_flexpriority(void)
1158 {
1159         return cpu_has_vmx_tpr_shadow() &&
1160                 cpu_has_vmx_virtualize_apic_accesses();
1161 }
1162
1163 static inline bool cpu_has_vmx_ept_execute_only(void)
1164 {
1165         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1166 }
1167
1168 static inline bool cpu_has_vmx_ept_2m_page(void)
1169 {
1170         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1171 }
1172
1173 static inline bool cpu_has_vmx_ept_1g_page(void)
1174 {
1175         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1176 }
1177
1178 static inline bool cpu_has_vmx_ept_4levels(void)
1179 {
1180         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1181 }
1182
1183 static inline bool cpu_has_vmx_ept_mt_wb(void)
1184 {
1185         return vmx_capability.ept & VMX_EPTP_WB_BIT;
1186 }
1187
1188 static inline bool cpu_has_vmx_ept_5levels(void)
1189 {
1190         return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1191 }
1192
1193 static inline bool cpu_has_vmx_ept_ad_bits(void)
1194 {
1195         return vmx_capability.ept & VMX_EPT_AD_BIT;
1196 }
1197
1198 static inline bool cpu_has_vmx_invept_context(void)
1199 {
1200         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1201 }
1202
1203 static inline bool cpu_has_vmx_invept_global(void)
1204 {
1205         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1206 }
1207
1208 static inline bool cpu_has_vmx_invvpid_single(void)
1209 {
1210         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1211 }
1212
1213 static inline bool cpu_has_vmx_invvpid_global(void)
1214 {
1215         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1216 }
1217
1218 static inline bool cpu_has_vmx_invvpid(void)
1219 {
1220         return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1221 }
1222
1223 static inline bool cpu_has_vmx_ept(void)
1224 {
1225         return vmcs_config.cpu_based_2nd_exec_ctrl &
1226                 SECONDARY_EXEC_ENABLE_EPT;
1227 }
1228
1229 static inline bool cpu_has_vmx_unrestricted_guest(void)
1230 {
1231         return vmcs_config.cpu_based_2nd_exec_ctrl &
1232                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1233 }
1234
1235 static inline bool cpu_has_vmx_ple(void)
1236 {
1237         return vmcs_config.cpu_based_2nd_exec_ctrl &
1238                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1239 }
1240
1241 static inline bool cpu_has_vmx_basic_inout(void)
1242 {
1243         return  (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1244 }
1245
1246 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1247 {
1248         return flexpriority_enabled && lapic_in_kernel(vcpu);
1249 }
1250
1251 static inline bool cpu_has_vmx_vpid(void)
1252 {
1253         return vmcs_config.cpu_based_2nd_exec_ctrl &
1254                 SECONDARY_EXEC_ENABLE_VPID;
1255 }
1256
1257 static inline bool cpu_has_vmx_rdtscp(void)
1258 {
1259         return vmcs_config.cpu_based_2nd_exec_ctrl &
1260                 SECONDARY_EXEC_RDTSCP;
1261 }
1262
1263 static inline bool cpu_has_vmx_invpcid(void)
1264 {
1265         return vmcs_config.cpu_based_2nd_exec_ctrl &
1266                 SECONDARY_EXEC_ENABLE_INVPCID;
1267 }
1268
1269 static inline bool cpu_has_virtual_nmis(void)
1270 {
1271         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1272 }
1273
1274 static inline bool cpu_has_vmx_wbinvd_exit(void)
1275 {
1276         return vmcs_config.cpu_based_2nd_exec_ctrl &
1277                 SECONDARY_EXEC_WBINVD_EXITING;
1278 }
1279
1280 static inline bool cpu_has_vmx_shadow_vmcs(void)
1281 {
1282         u64 vmx_msr;
1283         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1284         /* check if the cpu supports writing r/o exit information fields */
1285         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1286                 return false;
1287
1288         return vmcs_config.cpu_based_2nd_exec_ctrl &
1289                 SECONDARY_EXEC_SHADOW_VMCS;
1290 }
1291
1292 static inline bool cpu_has_vmx_pml(void)
1293 {
1294         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1295 }
1296
1297 static inline bool cpu_has_vmx_tsc_scaling(void)
1298 {
1299         return vmcs_config.cpu_based_2nd_exec_ctrl &
1300                 SECONDARY_EXEC_TSC_SCALING;
1301 }
1302
1303 static inline bool cpu_has_vmx_vmfunc(void)
1304 {
1305         return vmcs_config.cpu_based_2nd_exec_ctrl &
1306                 SECONDARY_EXEC_ENABLE_VMFUNC;
1307 }
1308
1309 static inline bool report_flexpriority(void)
1310 {
1311         return flexpriority_enabled;
1312 }
1313
1314 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1315 {
1316         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low);
1317 }
1318
1319 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1320 {
1321         return vmcs12->cpu_based_vm_exec_control & bit;
1322 }
1323
1324 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1325 {
1326         return (vmcs12->cpu_based_vm_exec_control &
1327                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1328                 (vmcs12->secondary_vm_exec_control & bit);
1329 }
1330
1331 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1332 {
1333         return vmcs12->pin_based_vm_exec_control &
1334                 PIN_BASED_VMX_PREEMPTION_TIMER;
1335 }
1336
1337 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1338 {
1339         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1340 }
1341
1342 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1343 {
1344         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1345 }
1346
1347 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1348 {
1349         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1350 }
1351
1352 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1353 {
1354         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1355 }
1356
1357 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1358 {
1359         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1360 }
1361
1362 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1363 {
1364         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1365 }
1366
1367 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1368 {
1369         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1370 }
1371
1372 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1373 {
1374         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1375 }
1376
1377 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
1378 {
1379         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
1380 }
1381
1382 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
1383 {
1384         return nested_cpu_has_vmfunc(vmcs12) &&
1385                 (vmcs12->vm_function_control &
1386                  VMX_VMFUNC_EPTP_SWITCHING);
1387 }
1388
1389 static inline bool is_nmi(u32 intr_info)
1390 {
1391         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1392                 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
1393 }
1394
1395 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1396                               u32 exit_intr_info,
1397                               unsigned long exit_qualification);
1398 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1399                         struct vmcs12 *vmcs12,
1400                         u32 reason, unsigned long qualification);
1401
1402 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1403 {
1404         int i;
1405
1406         for (i = 0; i < vmx->nmsrs; ++i)
1407                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1408                         return i;
1409         return -1;
1410 }
1411
1412 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1413 {
1414     struct {
1415         u64 vpid : 16;
1416         u64 rsvd : 48;
1417         u64 gva;
1418     } operand = { vpid, 0, gva };
1419
1420     asm volatile (__ex(ASM_VMX_INVVPID)
1421                   /* CF==1 or ZF==1 --> rc = -1 */
1422                   "; ja 1f ; ud2 ; 1:"
1423                   : : "a"(&operand), "c"(ext) : "cc", "memory");
1424 }
1425
1426 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1427 {
1428         struct {
1429                 u64 eptp, gpa;
1430         } operand = {eptp, gpa};
1431
1432         asm volatile (__ex(ASM_VMX_INVEPT)
1433                         /* CF==1 or ZF==1 --> rc = -1 */
1434                         "; ja 1f ; ud2 ; 1:\n"
1435                         : : "a" (&operand), "c" (ext) : "cc", "memory");
1436 }
1437
1438 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1439 {
1440         int i;
1441
1442         i = __find_msr_index(vmx, msr);
1443         if (i >= 0)
1444                 return &vmx->guest_msrs[i];
1445         return NULL;
1446 }
1447
1448 static void vmcs_clear(struct vmcs *vmcs)
1449 {
1450         u64 phys_addr = __pa(vmcs);
1451         u8 error;
1452
1453         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1454                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1455                       : "cc", "memory");
1456         if (error)
1457                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1458                        vmcs, phys_addr);
1459 }
1460
1461 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1462 {
1463         vmcs_clear(loaded_vmcs->vmcs);
1464         if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1465                 vmcs_clear(loaded_vmcs->shadow_vmcs);
1466         loaded_vmcs->cpu = -1;
1467         loaded_vmcs->launched = 0;
1468 }
1469
1470 static void vmcs_load(struct vmcs *vmcs)
1471 {
1472         u64 phys_addr = __pa(vmcs);
1473         u8 error;
1474
1475         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1476                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1477                         : "cc", "memory");
1478         if (error)
1479                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1480                        vmcs, phys_addr);
1481 }
1482
1483 #ifdef CONFIG_KEXEC_CORE
1484 /*
1485  * This bitmap is used to indicate whether the vmclear
1486  * operation is enabled on all cpus. All disabled by
1487  * default.
1488  */
1489 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1490
1491 static inline void crash_enable_local_vmclear(int cpu)
1492 {
1493         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1494 }
1495
1496 static inline void crash_disable_local_vmclear(int cpu)
1497 {
1498         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1499 }
1500
1501 static inline int crash_local_vmclear_enabled(int cpu)
1502 {
1503         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1504 }
1505
1506 static void crash_vmclear_local_loaded_vmcss(void)
1507 {
1508         int cpu = raw_smp_processor_id();
1509         struct loaded_vmcs *v;
1510
1511         if (!crash_local_vmclear_enabled(cpu))
1512                 return;
1513
1514         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1515                             loaded_vmcss_on_cpu_link)
1516                 vmcs_clear(v->vmcs);
1517 }
1518 #else
1519 static inline void crash_enable_local_vmclear(int cpu) { }
1520 static inline void crash_disable_local_vmclear(int cpu) { }
1521 #endif /* CONFIG_KEXEC_CORE */
1522
1523 static void __loaded_vmcs_clear(void *arg)
1524 {
1525         struct loaded_vmcs *loaded_vmcs = arg;
1526         int cpu = raw_smp_processor_id();
1527
1528         if (loaded_vmcs->cpu != cpu)
1529                 return; /* vcpu migration can race with cpu offline */
1530         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1531                 per_cpu(current_vmcs, cpu) = NULL;
1532         crash_disable_local_vmclear(cpu);
1533         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1534
1535         /*
1536          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1537          * is before setting loaded_vmcs->vcpu to -1 which is done in
1538          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1539          * then adds the vmcs into percpu list before it is deleted.
1540          */
1541         smp_wmb();
1542
1543         loaded_vmcs_init(loaded_vmcs);
1544         crash_enable_local_vmclear(cpu);
1545 }
1546
1547 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1548 {
1549         int cpu = loaded_vmcs->cpu;
1550
1551         if (cpu != -1)
1552                 smp_call_function_single(cpu,
1553                          __loaded_vmcs_clear, loaded_vmcs, 1);
1554 }
1555
1556 static inline void vpid_sync_vcpu_single(int vpid)
1557 {
1558         if (vpid == 0)
1559                 return;
1560
1561         if (cpu_has_vmx_invvpid_single())
1562                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
1563 }
1564
1565 static inline void vpid_sync_vcpu_global(void)
1566 {
1567         if (cpu_has_vmx_invvpid_global())
1568                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1569 }
1570
1571 static inline void vpid_sync_context(int vpid)
1572 {
1573         if (cpu_has_vmx_invvpid_single())
1574                 vpid_sync_vcpu_single(vpid);
1575         else
1576                 vpid_sync_vcpu_global();
1577 }
1578
1579 static inline void ept_sync_global(void)
1580 {
1581         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1582 }
1583
1584 static inline void ept_sync_context(u64 eptp)
1585 {
1586         if (cpu_has_vmx_invept_context())
1587                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1588         else
1589                 ept_sync_global();
1590 }
1591
1592 static __always_inline void vmcs_check16(unsigned long field)
1593 {
1594         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1595                          "16-bit accessor invalid for 64-bit field");
1596         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1597                          "16-bit accessor invalid for 64-bit high field");
1598         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1599                          "16-bit accessor invalid for 32-bit high field");
1600         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1601                          "16-bit accessor invalid for natural width field");
1602 }
1603
1604 static __always_inline void vmcs_check32(unsigned long field)
1605 {
1606         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1607                          "32-bit accessor invalid for 16-bit field");
1608         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1609                          "32-bit accessor invalid for natural width field");
1610 }
1611
1612 static __always_inline void vmcs_check64(unsigned long field)
1613 {
1614         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1615                          "64-bit accessor invalid for 16-bit field");
1616         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1617                          "64-bit accessor invalid for 64-bit high field");
1618         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1619                          "64-bit accessor invalid for 32-bit field");
1620         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1621                          "64-bit accessor invalid for natural width field");
1622 }
1623
1624 static __always_inline void vmcs_checkl(unsigned long field)
1625 {
1626         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1627                          "Natural width accessor invalid for 16-bit field");
1628         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1629                          "Natural width accessor invalid for 64-bit field");
1630         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1631                          "Natural width accessor invalid for 64-bit high field");
1632         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1633                          "Natural width accessor invalid for 32-bit field");
1634 }
1635
1636 static __always_inline unsigned long __vmcs_readl(unsigned long field)
1637 {
1638         unsigned long value;
1639
1640         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1641                       : "=a"(value) : "d"(field) : "cc");
1642         return value;
1643 }
1644
1645 static __always_inline u16 vmcs_read16(unsigned long field)
1646 {
1647         vmcs_check16(field);
1648         return __vmcs_readl(field);
1649 }
1650
1651 static __always_inline u32 vmcs_read32(unsigned long field)
1652 {
1653         vmcs_check32(field);
1654         return __vmcs_readl(field);
1655 }
1656
1657 static __always_inline u64 vmcs_read64(unsigned long field)
1658 {
1659         vmcs_check64(field);
1660 #ifdef CONFIG_X86_64
1661         return __vmcs_readl(field);
1662 #else
1663         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
1664 #endif
1665 }
1666
1667 static __always_inline unsigned long vmcs_readl(unsigned long field)
1668 {
1669         vmcs_checkl(field);
1670         return __vmcs_readl(field);
1671 }
1672
1673 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1674 {
1675         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1676                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1677         dump_stack();
1678 }
1679
1680 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
1681 {
1682         u8 error;
1683
1684         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1685                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1686         if (unlikely(error))
1687                 vmwrite_error(field, value);
1688 }
1689
1690 static __always_inline void vmcs_write16(unsigned long field, u16 value)
1691 {
1692         vmcs_check16(field);
1693         __vmcs_writel(field, value);
1694 }
1695
1696 static __always_inline void vmcs_write32(unsigned long field, u32 value)
1697 {
1698         vmcs_check32(field);
1699         __vmcs_writel(field, value);
1700 }
1701
1702 static __always_inline void vmcs_write64(unsigned long field, u64 value)
1703 {
1704         vmcs_check64(field);
1705         __vmcs_writel(field, value);
1706 #ifndef CONFIG_X86_64
1707         asm volatile ("");
1708         __vmcs_writel(field+1, value >> 32);
1709 #endif
1710 }
1711
1712 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
1713 {
1714         vmcs_checkl(field);
1715         __vmcs_writel(field, value);
1716 }
1717
1718 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
1719 {
1720         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1721                          "vmcs_clear_bits does not support 64-bit fields");
1722         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
1723 }
1724
1725 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
1726 {
1727         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1728                          "vmcs_set_bits does not support 64-bit fields");
1729         __vmcs_writel(field, __vmcs_readl(field) | mask);
1730 }
1731
1732 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
1733 {
1734         vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
1735 }
1736
1737 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1738 {
1739         vmcs_write32(VM_ENTRY_CONTROLS, val);
1740         vmx->vm_entry_controls_shadow = val;
1741 }
1742
1743 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1744 {
1745         if (vmx->vm_entry_controls_shadow != val)
1746                 vm_entry_controls_init(vmx, val);
1747 }
1748
1749 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1750 {
1751         return vmx->vm_entry_controls_shadow;
1752 }
1753
1754
1755 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1756 {
1757         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1758 }
1759
1760 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1761 {
1762         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1763 }
1764
1765 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
1766 {
1767         vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
1768 }
1769
1770 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1771 {
1772         vmcs_write32(VM_EXIT_CONTROLS, val);
1773         vmx->vm_exit_controls_shadow = val;
1774 }
1775
1776 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1777 {
1778         if (vmx->vm_exit_controls_shadow != val)
1779                 vm_exit_controls_init(vmx, val);
1780 }
1781
1782 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1783 {
1784         return vmx->vm_exit_controls_shadow;
1785 }
1786
1787
1788 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1789 {
1790         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1791 }
1792
1793 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1794 {
1795         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1796 }
1797
1798 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1799 {
1800         vmx->segment_cache.bitmask = 0;
1801 }
1802
1803 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1804                                        unsigned field)
1805 {
1806         bool ret;
1807         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1808
1809         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1810                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1811                 vmx->segment_cache.bitmask = 0;
1812         }
1813         ret = vmx->segment_cache.bitmask & mask;
1814         vmx->segment_cache.bitmask |= mask;
1815         return ret;
1816 }
1817
1818 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1819 {
1820         u16 *p = &vmx->segment_cache.seg[seg].selector;
1821
1822         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1823                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1824         return *p;
1825 }
1826
1827 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1828 {
1829         ulong *p = &vmx->segment_cache.seg[seg].base;
1830
1831         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1832                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1833         return *p;
1834 }
1835
1836 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1837 {
1838         u32 *p = &vmx->segment_cache.seg[seg].limit;
1839
1840         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1841                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1842         return *p;
1843 }
1844
1845 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1846 {
1847         u32 *p = &vmx->segment_cache.seg[seg].ar;
1848
1849         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1850                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1851         return *p;
1852 }
1853
1854 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1855 {
1856         u32 eb;
1857
1858         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1859              (1u << DB_VECTOR) | (1u << AC_VECTOR);
1860         if ((vcpu->guest_debug &
1861              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1862             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1863                 eb |= 1u << BP_VECTOR;
1864         if (to_vmx(vcpu)->rmode.vm86_active)
1865                 eb = ~0;
1866         if (enable_ept)
1867                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1868
1869         /* When we are running a nested L2 guest and L1 specified for it a
1870          * certain exception bitmap, we must trap the same exceptions and pass
1871          * them to L1. When running L2, we will only handle the exceptions
1872          * specified above if L1 did not want them.
1873          */
1874         if (is_guest_mode(vcpu))
1875                 eb |= get_vmcs12(vcpu)->exception_bitmap;
1876
1877         vmcs_write32(EXCEPTION_BITMAP, eb);
1878 }
1879
1880 /*
1881  * Check if MSR is intercepted for currently loaded MSR bitmap.
1882  */
1883 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
1884 {
1885         unsigned long *msr_bitmap;
1886         int f = sizeof(unsigned long);
1887
1888         if (!cpu_has_vmx_msr_bitmap())
1889                 return true;
1890
1891         msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
1892
1893         if (msr <= 0x1fff) {
1894                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
1895         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
1896                 msr &= 0x1fff;
1897                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
1898         }
1899
1900         return true;
1901 }
1902
1903 /*
1904  * Check if MSR is intercepted for L01 MSR bitmap.
1905  */
1906 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
1907 {
1908         unsigned long *msr_bitmap;
1909         int f = sizeof(unsigned long);
1910
1911         if (!cpu_has_vmx_msr_bitmap())
1912                 return true;
1913
1914         msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
1915
1916         if (msr <= 0x1fff) {
1917                 return !!test_bit(msr, msr_bitmap + 0x800 / f);
1918         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
1919                 msr &= 0x1fff;
1920                 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
1921         }
1922
1923         return true;
1924 }
1925
1926 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1927                 unsigned long entry, unsigned long exit)
1928 {
1929         vm_entry_controls_clearbit(vmx, entry);
1930         vm_exit_controls_clearbit(vmx, exit);
1931 }
1932
1933 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1934 {
1935         unsigned i;
1936         struct msr_autoload *m = &vmx->msr_autoload;
1937
1938         switch (msr) {
1939         case MSR_EFER:
1940                 if (cpu_has_load_ia32_efer) {
1941                         clear_atomic_switch_msr_special(vmx,
1942                                         VM_ENTRY_LOAD_IA32_EFER,
1943                                         VM_EXIT_LOAD_IA32_EFER);
1944                         return;
1945                 }
1946                 break;
1947         case MSR_CORE_PERF_GLOBAL_CTRL:
1948                 if (cpu_has_load_perf_global_ctrl) {
1949                         clear_atomic_switch_msr_special(vmx,
1950                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1951                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1952                         return;
1953                 }
1954                 break;
1955         }
1956
1957         for (i = 0; i < m->nr; ++i)
1958                 if (m->guest[i].index == msr)
1959                         break;
1960
1961         if (i == m->nr)
1962                 return;
1963         --m->nr;
1964         m->guest[i] = m->guest[m->nr];
1965         m->host[i] = m->host[m->nr];
1966         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1967         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1968 }
1969
1970 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1971                 unsigned long entry, unsigned long exit,
1972                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1973                 u64 guest_val, u64 host_val)
1974 {
1975         vmcs_write64(guest_val_vmcs, guest_val);
1976         vmcs_write64(host_val_vmcs, host_val);
1977         vm_entry_controls_setbit(vmx, entry);
1978         vm_exit_controls_setbit(vmx, exit);
1979 }
1980
1981 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1982                                   u64 guest_val, u64 host_val)
1983 {
1984         unsigned i;
1985         struct msr_autoload *m = &vmx->msr_autoload;
1986
1987         switch (msr) {
1988         case MSR_EFER:
1989                 if (cpu_has_load_ia32_efer) {
1990                         add_atomic_switch_msr_special(vmx,
1991                                         VM_ENTRY_LOAD_IA32_EFER,
1992                                         VM_EXIT_LOAD_IA32_EFER,
1993                                         GUEST_IA32_EFER,
1994                                         HOST_IA32_EFER,
1995                                         guest_val, host_val);
1996                         return;
1997                 }
1998                 break;
1999         case MSR_CORE_PERF_GLOBAL_CTRL:
2000                 if (cpu_has_load_perf_global_ctrl) {
2001                         add_atomic_switch_msr_special(vmx,
2002                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2003                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2004                                         GUEST_IA32_PERF_GLOBAL_CTRL,
2005                                         HOST_IA32_PERF_GLOBAL_CTRL,
2006                                         guest_val, host_val);
2007                         return;
2008                 }
2009                 break;
2010         case MSR_IA32_PEBS_ENABLE:
2011                 /* PEBS needs a quiescent period after being disabled (to write
2012                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
2013                  * provide that period, so a CPU could write host's record into
2014                  * guest's memory.
2015                  */
2016                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
2017         }
2018
2019         for (i = 0; i < m->nr; ++i)
2020                 if (m->guest[i].index == msr)
2021                         break;
2022
2023         if (i == NR_AUTOLOAD_MSRS) {
2024                 printk_once(KERN_WARNING "Not enough msr switch entries. "
2025                                 "Can't add msr %x\n", msr);
2026                 return;
2027         } else if (i == m->nr) {
2028                 ++m->nr;
2029                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2030                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2031         }
2032
2033         m->guest[i].index = msr;
2034         m->guest[i].value = guest_val;
2035         m->host[i].index = msr;
2036         m->host[i].value = host_val;
2037 }
2038
2039 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2040 {
2041         u64 guest_efer = vmx->vcpu.arch.efer;
2042         u64 ignore_bits = 0;
2043
2044         if (!enable_ept) {
2045                 /*
2046                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
2047                  * host CPUID is more efficient than testing guest CPUID
2048                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
2049                  */
2050                 if (boot_cpu_has(X86_FEATURE_SMEP))
2051                         guest_efer |= EFER_NX;
2052                 else if (!(guest_efer & EFER_NX))
2053                         ignore_bits |= EFER_NX;
2054         }
2055
2056         /*
2057          * LMA and LME handled by hardware; SCE meaningless outside long mode.
2058          */
2059         ignore_bits |= EFER_SCE;
2060 #ifdef CONFIG_X86_64
2061         ignore_bits |= EFER_LMA | EFER_LME;
2062         /* SCE is meaningful only in long mode on Intel */
2063         if (guest_efer & EFER_LMA)
2064                 ignore_bits &= ~(u64)EFER_SCE;
2065 #endif
2066
2067         clear_atomic_switch_msr(vmx, MSR_EFER);
2068
2069         /*
2070          * On EPT, we can't emulate NX, so we must switch EFER atomically.
2071          * On CPUs that support "load IA32_EFER", always switch EFER
2072          * atomically, since it's faster than switching it manually.
2073          */
2074         if (cpu_has_load_ia32_efer ||
2075             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2076                 if (!(guest_efer & EFER_LMA))
2077                         guest_efer &= ~EFER_LME;
2078                 if (guest_efer != host_efer)
2079                         add_atomic_switch_msr(vmx, MSR_EFER,
2080                                               guest_efer, host_efer);
2081                 return false;
2082         } else {
2083                 guest_efer &= ~ignore_bits;
2084                 guest_efer |= host_efer & ignore_bits;
2085
2086                 vmx->guest_msrs[efer_offset].data = guest_efer;
2087                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2088
2089                 return true;
2090         }
2091 }
2092
2093 #ifdef CONFIG_X86_32
2094 /*
2095  * On 32-bit kernels, VM exits still load the FS and GS bases from the
2096  * VMCS rather than the segment table.  KVM uses this helper to figure
2097  * out the current bases to poke them into the VMCS before entry.
2098  */
2099 static unsigned long segment_base(u16 selector)
2100 {
2101         struct desc_struct *table;
2102         unsigned long v;
2103
2104         if (!(selector & ~SEGMENT_RPL_MASK))
2105                 return 0;
2106
2107         table = get_current_gdt_ro();
2108
2109         if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2110                 u16 ldt_selector = kvm_read_ldt();
2111
2112                 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2113                         return 0;
2114
2115                 table = (struct desc_struct *)segment_base(ldt_selector);
2116         }
2117         v = get_desc_base(&table[selector >> 3]);
2118         return v;
2119 }
2120 #endif
2121
2122 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2123 {
2124         struct vcpu_vmx *vmx = to_vmx(vcpu);
2125         int i;
2126
2127         if (vmx->host_state.loaded)
2128                 return;
2129
2130         vmx->host_state.loaded = 1;
2131         /*
2132          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2133          * allow segment selectors with cpl > 0 or ti == 1.
2134          */
2135         vmx->host_state.ldt_sel = kvm_read_ldt();
2136         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
2137         savesegment(fs, vmx->host_state.fs_sel);
2138         if (!(vmx->host_state.fs_sel & 7)) {
2139                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
2140                 vmx->host_state.fs_reload_needed = 0;
2141         } else {
2142                 vmcs_write16(HOST_FS_SELECTOR, 0);
2143                 vmx->host_state.fs_reload_needed = 1;
2144         }
2145         savesegment(gs, vmx->host_state.gs_sel);
2146         if (!(vmx->host_state.gs_sel & 7))
2147                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
2148         else {
2149                 vmcs_write16(HOST_GS_SELECTOR, 0);
2150                 vmx->host_state.gs_ldt_reload_needed = 1;
2151         }
2152
2153 #ifdef CONFIG_X86_64
2154         savesegment(ds, vmx->host_state.ds_sel);
2155         savesegment(es, vmx->host_state.es_sel);
2156 #endif
2157
2158 #ifdef CONFIG_X86_64
2159         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
2160         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
2161 #else
2162         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2163         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2164 #endif
2165
2166 #ifdef CONFIG_X86_64
2167         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2168         if (is_long_mode(&vmx->vcpu))
2169                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2170 #endif
2171         if (boot_cpu_has(X86_FEATURE_MPX))
2172                 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2173         for (i = 0; i < vmx->save_nmsrs; ++i)
2174                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2175                                    vmx->guest_msrs[i].data,
2176                                    vmx->guest_msrs[i].mask);
2177 }
2178
2179 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2180 {
2181         if (!vmx->host_state.loaded)
2182                 return;
2183
2184         ++vmx->vcpu.stat.host_state_reload;
2185         vmx->host_state.loaded = 0;
2186 #ifdef CONFIG_X86_64
2187         if (is_long_mode(&vmx->vcpu))
2188                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2189 #endif
2190         if (vmx->host_state.gs_ldt_reload_needed) {
2191                 kvm_load_ldt(vmx->host_state.ldt_sel);
2192 #ifdef CONFIG_X86_64
2193                 load_gs_index(vmx->host_state.gs_sel);
2194 #else
2195                 loadsegment(gs, vmx->host_state.gs_sel);
2196 #endif
2197         }
2198         if (vmx->host_state.fs_reload_needed)
2199                 loadsegment(fs, vmx->host_state.fs_sel);
2200 #ifdef CONFIG_X86_64
2201         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2202                 loadsegment(ds, vmx->host_state.ds_sel);
2203                 loadsegment(es, vmx->host_state.es_sel);
2204         }
2205 #endif
2206         invalidate_tss_limit();
2207 #ifdef CONFIG_X86_64
2208         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2209 #endif
2210         if (vmx->host_state.msr_host_bndcfgs)
2211                 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2212         load_fixmap_gdt(raw_smp_processor_id());
2213 }
2214
2215 static void vmx_load_host_state(struct vcpu_vmx *vmx)
2216 {
2217         preempt_disable();
2218         __vmx_load_host_state(vmx);
2219         preempt_enable();
2220 }
2221
2222 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2223 {
2224         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2225         struct pi_desc old, new;
2226         unsigned int dest;
2227
2228         /*
2229          * In case of hot-plug or hot-unplug, we may have to undo
2230          * vmx_vcpu_pi_put even if there is no assigned device.  And we
2231          * always keep PI.NDST up to date for simplicity: it makes the
2232          * code easier, and CPU migration is not a fast path.
2233          */
2234         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
2235                 return;
2236
2237         /*
2238          * First handle the simple case where no cmpxchg is necessary; just
2239          * allow posting non-urgent interrupts.
2240          *
2241          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2242          * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2243          * expects the VCPU to be on the blocked_vcpu_list that matches
2244          * PI.NDST.
2245          */
2246         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2247             vcpu->cpu == cpu) {
2248                 pi_clear_sn(pi_desc);
2249                 return;
2250         }
2251
2252         /* The full case.  */
2253         do {
2254                 old.control = new.control = pi_desc->control;
2255
2256                 dest = cpu_physical_id(cpu);
2257
2258                 if (x2apic_enabled())
2259                         new.ndst = dest;
2260                 else
2261                         new.ndst = (dest << 8) & 0xFF00;
2262
2263                 new.sn = 0;
2264         } while (cmpxchg64(&pi_desc->control, old.control,
2265                            new.control) != old.control);
2266 }
2267
2268 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2269 {
2270         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2271         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2272 }
2273
2274 /*
2275  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2276  * vcpu mutex is already taken.
2277  */
2278 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2279 {
2280         struct vcpu_vmx *vmx = to_vmx(vcpu);
2281         bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
2282
2283         if (!already_loaded) {
2284                 loaded_vmcs_clear(vmx->loaded_vmcs);
2285                 local_irq_disable();
2286                 crash_disable_local_vmclear(cpu);
2287
2288                 /*
2289                  * Read loaded_vmcs->cpu should be before fetching
2290                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
2291                  * See the comments in __loaded_vmcs_clear().
2292                  */
2293                 smp_rmb();
2294
2295                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2296                          &per_cpu(loaded_vmcss_on_cpu, cpu));
2297                 crash_enable_local_vmclear(cpu);
2298                 local_irq_enable();
2299         }
2300
2301         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2302                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2303                 vmcs_load(vmx->loaded_vmcs->vmcs);
2304                 indirect_branch_prediction_barrier();
2305         }
2306
2307         if (!already_loaded) {
2308                 void *gdt = get_current_gdt_ro();
2309                 unsigned long sysenter_esp;
2310
2311                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2312
2313                 /*
2314                  * Linux uses per-cpu TSS and GDT, so set these when switching
2315                  * processors.  See 22.2.4.
2316                  */
2317                 vmcs_writel(HOST_TR_BASE,
2318                             (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
2319                 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
2320
2321                 /*
2322                  * VM exits change the host TR limit to 0x67 after a VM
2323                  * exit.  This is okay, since 0x67 covers everything except
2324                  * the IO bitmap and have have code to handle the IO bitmap
2325                  * being lost after a VM exit.
2326                  */
2327                 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
2328
2329                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2330                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2331
2332                 vmx->loaded_vmcs->cpu = cpu;
2333         }
2334
2335         /* Setup TSC multiplier */
2336         if (kvm_has_tsc_control &&
2337             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2338                 decache_tsc_multiplier(vmx);
2339
2340         vmx_vcpu_pi_load(vcpu, cpu);
2341         vmx->host_pkru = read_pkru();
2342         vmx->host_debugctlmsr = get_debugctlmsr();
2343 }
2344
2345 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2346 {
2347         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2348
2349         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2350                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
2351                 !kvm_vcpu_apicv_active(vcpu))
2352                 return;
2353
2354         /* Set SN when the vCPU is preempted */
2355         if (vcpu->preempted)
2356                 pi_set_sn(pi_desc);
2357 }
2358
2359 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2360 {
2361         vmx_vcpu_pi_put(vcpu);
2362
2363         __vmx_load_host_state(to_vmx(vcpu));
2364 }
2365
2366 static bool emulation_required(struct kvm_vcpu *vcpu)
2367 {
2368         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2369 }
2370
2371 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2372
2373 /*
2374  * Return the cr0 value that a nested guest would read. This is a combination
2375  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2376  * its hypervisor (cr0_read_shadow).
2377  */
2378 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2379 {
2380         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2381                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2382 }
2383 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2384 {
2385         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2386                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2387 }
2388
2389 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2390 {
2391         unsigned long rflags, save_rflags;
2392
2393         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2394                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2395                 rflags = vmcs_readl(GUEST_RFLAGS);
2396                 if (to_vmx(vcpu)->rmode.vm86_active) {
2397                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2398                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2399                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2400                 }
2401                 to_vmx(vcpu)->rflags = rflags;
2402         }
2403         return to_vmx(vcpu)->rflags;
2404 }
2405
2406 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2407 {
2408         unsigned long old_rflags = vmx_get_rflags(vcpu);
2409
2410         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2411         to_vmx(vcpu)->rflags = rflags;
2412         if (to_vmx(vcpu)->rmode.vm86_active) {
2413                 to_vmx(vcpu)->rmode.save_rflags = rflags;
2414                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2415         }
2416         vmcs_writel(GUEST_RFLAGS, rflags);
2417
2418         if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2419                 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2420 }
2421
2422 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2423 {
2424         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2425         int ret = 0;
2426
2427         if (interruptibility & GUEST_INTR_STATE_STI)
2428                 ret |= KVM_X86_SHADOW_INT_STI;
2429         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2430                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2431
2432         return ret;
2433 }
2434
2435 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2436 {
2437         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2438         u32 interruptibility = interruptibility_old;
2439
2440         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2441
2442         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2443                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2444         else if (mask & KVM_X86_SHADOW_INT_STI)
2445                 interruptibility |= GUEST_INTR_STATE_STI;
2446
2447         if ((interruptibility != interruptibility_old))
2448                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2449 }
2450
2451 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2452 {
2453         unsigned long rip;
2454
2455         rip = kvm_rip_read(vcpu);
2456         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2457         kvm_rip_write(vcpu, rip);
2458
2459         /* skipping an emulated instruction also counts */
2460         vmx_set_interrupt_shadow(vcpu, 0);
2461 }
2462
2463 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
2464                                                unsigned long exit_qual)
2465 {
2466         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2467         unsigned int nr = vcpu->arch.exception.nr;
2468         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2469
2470         if (vcpu->arch.exception.has_error_code) {
2471                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
2472                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2473         }
2474
2475         if (kvm_exception_is_soft(nr))
2476                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2477         else
2478                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2479
2480         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
2481             vmx_get_nmi_mask(vcpu))
2482                 intr_info |= INTR_INFO_UNBLOCK_NMI;
2483
2484         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
2485 }
2486
2487 /*
2488  * KVM wants to inject page-faults which it got to the guest. This function
2489  * checks whether in a nested guest, we need to inject them to L1 or L2.
2490  */
2491 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
2492 {
2493         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2494         unsigned int nr = vcpu->arch.exception.nr;
2495
2496         if (nr == PF_VECTOR) {
2497                 if (vcpu->arch.exception.nested_apf) {
2498                         *exit_qual = vcpu->arch.apf.nested_apf_token;
2499                         return 1;
2500                 }
2501                 /*
2502                  * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
2503                  * The fix is to add the ancillary datum (CR2 or DR6) to structs
2504                  * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
2505                  * can be written only when inject_pending_event runs.  This should be
2506                  * conditional on a new capability---if the capability is disabled,
2507                  * kvm_multiple_exception would write the ancillary information to
2508                  * CR2 or DR6, for backwards ABI-compatibility.
2509                  */
2510                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
2511                                                     vcpu->arch.exception.error_code)) {
2512                         *exit_qual = vcpu->arch.cr2;
2513                         return 1;
2514                 }
2515         } else {
2516                 if (vmcs12->exception_bitmap & (1u << nr)) {
2517                         if (nr == DB_VECTOR)
2518                                 *exit_qual = vcpu->arch.dr6;
2519                         else
2520                                 *exit_qual = 0;
2521                         return 1;
2522                 }
2523         }
2524
2525         return 0;
2526 }
2527
2528 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
2529 {
2530         struct vcpu_vmx *vmx = to_vmx(vcpu);
2531         unsigned nr = vcpu->arch.exception.nr;
2532         bool has_error_code = vcpu->arch.exception.has_error_code;
2533         u32 error_code = vcpu->arch.exception.error_code;
2534         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2535
2536         if (has_error_code) {
2537                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2538                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2539         }
2540
2541         if (vmx->rmode.vm86_active) {
2542                 int inc_eip = 0;
2543                 if (kvm_exception_is_soft(nr))
2544                         inc_eip = vcpu->arch.event_exit_inst_len;
2545                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2546                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2547                 return;
2548         }
2549
2550         if (kvm_exception_is_soft(nr)) {
2551                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2552                              vmx->vcpu.arch.event_exit_inst_len);
2553                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2554         } else
2555                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2556
2557         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2558 }
2559
2560 static bool vmx_rdtscp_supported(void)
2561 {
2562         return cpu_has_vmx_rdtscp();
2563 }
2564
2565 static bool vmx_invpcid_supported(void)
2566 {
2567         return cpu_has_vmx_invpcid() && enable_ept;
2568 }
2569
2570 /*
2571  * Swap MSR entry in host/guest MSR entry array.
2572  */
2573 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2574 {
2575         struct shared_msr_entry tmp;
2576
2577         tmp = vmx->guest_msrs[to];
2578         vmx->guest_msrs[to] = vmx->guest_msrs[from];
2579         vmx->guest_msrs[from] = tmp;
2580 }
2581
2582 /*
2583  * Set up the vmcs to automatically save and restore system
2584  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
2585  * mode, as fiddling with msrs is very expensive.
2586  */
2587 static void setup_msrs(struct vcpu_vmx *vmx)
2588 {
2589         int save_nmsrs, index;
2590
2591         save_nmsrs = 0;
2592 #ifdef CONFIG_X86_64
2593         if (is_long_mode(&vmx->vcpu)) {
2594                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2595                 if (index >= 0)
2596                         move_msr_up(vmx, index, save_nmsrs++);
2597                 index = __find_msr_index(vmx, MSR_LSTAR);
2598                 if (index >= 0)
2599                         move_msr_up(vmx, index, save_nmsrs++);
2600                 index = __find_msr_index(vmx, MSR_CSTAR);
2601                 if (index >= 0)
2602                         move_msr_up(vmx, index, save_nmsrs++);
2603                 index = __find_msr_index(vmx, MSR_TSC_AUX);
2604                 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
2605                         move_msr_up(vmx, index, save_nmsrs++);
2606                 /*
2607                  * MSR_STAR is only needed on long mode guests, and only
2608                  * if efer.sce is enabled.
2609                  */
2610                 index = __find_msr_index(vmx, MSR_STAR);
2611                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2612                         move_msr_up(vmx, index, save_nmsrs++);
2613         }
2614 #endif
2615         index = __find_msr_index(vmx, MSR_EFER);
2616         if (index >= 0 && update_transition_efer(vmx, index))
2617                 move_msr_up(vmx, index, save_nmsrs++);
2618
2619         vmx->save_nmsrs = save_nmsrs;
2620
2621         if (cpu_has_vmx_msr_bitmap())
2622                 vmx_update_msr_bitmap(&vmx->vcpu);
2623 }
2624
2625 /*
2626  * reads and returns guest's timestamp counter "register"
2627  * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2628  * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2629  */
2630 static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2631 {
2632         u64 host_tsc, tsc_offset;
2633
2634         host_tsc = rdtsc();
2635         tsc_offset = vmcs_read64(TSC_OFFSET);
2636         return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
2637 }
2638
2639 /*
2640  * writes 'offset' into guest's timestamp counter offset register
2641  */
2642 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2643 {
2644         if (is_guest_mode(vcpu)) {
2645                 /*
2646                  * We're here if L1 chose not to trap WRMSR to TSC. According
2647                  * to the spec, this should set L1's TSC; The offset that L1
2648                  * set for L2 remains unchanged, and still needs to be added
2649                  * to the newly set TSC to get L2's TSC.
2650                  */
2651                 struct vmcs12 *vmcs12;
2652                 /* recalculate vmcs02.TSC_OFFSET: */
2653                 vmcs12 = get_vmcs12(vcpu);
2654                 vmcs_write64(TSC_OFFSET, offset +
2655                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2656                          vmcs12->tsc_offset : 0));
2657         } else {
2658                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2659                                            vmcs_read64(TSC_OFFSET), offset);
2660                 vmcs_write64(TSC_OFFSET, offset);
2661         }
2662 }
2663
2664 /*
2665  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2666  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2667  * all guests if the "nested" module option is off, and can also be disabled
2668  * for a single guest by disabling its VMX cpuid bit.
2669  */
2670 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2671 {
2672         return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
2673 }
2674
2675 /*
2676  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2677  * returned for the various VMX controls MSRs when nested VMX is enabled.
2678  * The same values should also be used to verify that vmcs12 control fields are
2679  * valid during nested entry from L1 to L2.
2680  * Each of these control msrs has a low and high 32-bit half: A low bit is on
2681  * if the corresponding bit in the (32-bit) control field *must* be on, and a
2682  * bit in the high half is on if the corresponding bit in the control field
2683  * may be on. See also vmx_control_verify().
2684  */
2685 static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2686 {
2687         /*
2688          * Note that as a general rule, the high half of the MSRs (bits in
2689          * the control fields which may be 1) should be initialized by the
2690          * intersection of the underlying hardware's MSR (i.e., features which
2691          * can be supported) and the list of features we want to expose -
2692          * because they are known to be properly supported in our code.
2693          * Also, usually, the low half of the MSRs (bits which must be 1) can
2694          * be set to 0, meaning that L1 may turn off any of these bits. The
2695          * reason is that if one of these bits is necessary, it will appear
2696          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2697          * fields of vmcs01 and vmcs02, will turn these bits off - and
2698          * nested_vmx_exit_reflected() will not pass related exits to L1.
2699          * These rules have exceptions below.
2700          */
2701
2702         /* pin-based controls */
2703         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2704                 vmx->nested.nested_vmx_pinbased_ctls_low,
2705                 vmx->nested.nested_vmx_pinbased_ctls_high);
2706         vmx->nested.nested_vmx_pinbased_ctls_low |=
2707                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2708         vmx->nested.nested_vmx_pinbased_ctls_high &=
2709                 PIN_BASED_EXT_INTR_MASK |
2710                 PIN_BASED_NMI_EXITING |
2711                 PIN_BASED_VIRTUAL_NMIS;
2712         vmx->nested.nested_vmx_pinbased_ctls_high |=
2713                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2714                 PIN_BASED_VMX_PREEMPTION_TIMER;
2715         if (kvm_vcpu_apicv_active(&vmx->vcpu))
2716                 vmx->nested.nested_vmx_pinbased_ctls_high |=
2717                         PIN_BASED_POSTED_INTR;
2718
2719         /* exit controls */
2720         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2721                 vmx->nested.nested_vmx_exit_ctls_low,
2722                 vmx->nested.nested_vmx_exit_ctls_high);
2723         vmx->nested.nested_vmx_exit_ctls_low =
2724                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2725
2726         vmx->nested.nested_vmx_exit_ctls_high &=
2727 #ifdef CONFIG_X86_64
2728                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2729 #endif
2730                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2731         vmx->nested.nested_vmx_exit_ctls_high |=
2732                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2733                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2734                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2735
2736         if (kvm_mpx_supported())
2737                 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2738
2739         /* We support free control of debug control saving. */
2740         vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2741
2742         /* entry controls */
2743         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2744                 vmx->nested.nested_vmx_entry_ctls_low,
2745                 vmx->nested.nested_vmx_entry_ctls_high);
2746         vmx->nested.nested_vmx_entry_ctls_low =
2747                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2748         vmx->nested.nested_vmx_entry_ctls_high &=
2749 #ifdef CONFIG_X86_64
2750                 VM_ENTRY_IA32E_MODE |
2751 #endif
2752                 VM_ENTRY_LOAD_IA32_PAT;
2753         vmx->nested.nested_vmx_entry_ctls_high |=
2754                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2755         if (kvm_mpx_supported())
2756                 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2757
2758         /* We support free control of debug control loading. */
2759         vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2760
2761         /* cpu-based controls */
2762         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2763                 vmx->nested.nested_vmx_procbased_ctls_low,
2764                 vmx->nested.nested_vmx_procbased_ctls_high);
2765         vmx->nested.nested_vmx_procbased_ctls_low =
2766                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2767         vmx->nested.nested_vmx_procbased_ctls_high &=
2768                 CPU_BASED_VIRTUAL_INTR_PENDING |
2769                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2770                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2771                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2772                 CPU_BASED_CR3_STORE_EXITING |
2773 #ifdef CONFIG_X86_64
2774                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2775 #endif
2776                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2777                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
2778                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
2779                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
2780                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2781         /*
2782          * We can allow some features even when not supported by the
2783          * hardware. For example, L1 can specify an MSR bitmap - and we
2784          * can use it to avoid exits to L1 - even when L0 runs L2
2785          * without MSR bitmaps.
2786          */
2787         vmx->nested.nested_vmx_procbased_ctls_high |=
2788                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2789                 CPU_BASED_USE_MSR_BITMAPS;
2790
2791         /* We support free control of CR3 access interception. */
2792         vmx->nested.nested_vmx_procbased_ctls_low &=
2793                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
2794
2795         /*
2796          * secondary cpu-based controls.  Do not include those that
2797          * depend on CPUID bits, they are added later by vmx_cpuid_update.
2798          */
2799         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2800                 vmx->nested.nested_vmx_secondary_ctls_low,
2801                 vmx->nested.nested_vmx_secondary_ctls_high);
2802         vmx->nested.nested_vmx_secondary_ctls_low = 0;
2803         vmx->nested.nested_vmx_secondary_ctls_high &=
2804                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2805                 SECONDARY_EXEC_DESC |
2806                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2807                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2808                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2809                 SECONDARY_EXEC_WBINVD_EXITING;
2810
2811         if (enable_ept) {
2812                 /* nested EPT: emulate EPT also to L1 */
2813                 vmx->nested.nested_vmx_secondary_ctls_high |=
2814                         SECONDARY_EXEC_ENABLE_EPT;
2815                 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2816                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
2817                 if (cpu_has_vmx_ept_execute_only())
2818                         vmx->nested.nested_vmx_ept_caps |=
2819                                 VMX_EPT_EXECUTE_ONLY_BIT;
2820                 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
2821                 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
2822                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
2823                         VMX_EPT_1GB_PAGE_BIT;
2824                 if (enable_ept_ad_bits) {
2825                         vmx->nested.nested_vmx_secondary_ctls_high |=
2826                                 SECONDARY_EXEC_ENABLE_PML;
2827                         vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
2828                 }
2829         }
2830
2831         if (cpu_has_vmx_vmfunc()) {
2832                 vmx->nested.nested_vmx_secondary_ctls_high |=
2833                         SECONDARY_EXEC_ENABLE_VMFUNC;
2834                 /*
2835                  * Advertise EPTP switching unconditionally
2836                  * since we emulate it
2837                  */
2838                 if (enable_ept)
2839                         vmx->nested.nested_vmx_vmfunc_controls =
2840                                 VMX_VMFUNC_EPTP_SWITCHING;
2841         }
2842
2843         /*
2844          * Old versions of KVM use the single-context version without
2845          * checking for support, so declare that it is supported even
2846          * though it is treated as global context.  The alternative is
2847          * not failing the single-context invvpid, and it is worse.
2848          */
2849         if (enable_vpid) {
2850                 vmx->nested.nested_vmx_secondary_ctls_high |=
2851                         SECONDARY_EXEC_ENABLE_VPID;
2852                 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2853                         VMX_VPID_EXTENT_SUPPORTED_MASK;
2854         }
2855
2856         if (enable_unrestricted_guest)
2857                 vmx->nested.nested_vmx_secondary_ctls_high |=
2858                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
2859
2860         /* miscellaneous data */
2861         rdmsr(MSR_IA32_VMX_MISC,
2862                 vmx->nested.nested_vmx_misc_low,
2863                 vmx->nested.nested_vmx_misc_high);
2864         vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2865         vmx->nested.nested_vmx_misc_low |=
2866                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
2867                 VMX_MISC_ACTIVITY_HLT;
2868         vmx->nested.nested_vmx_misc_high = 0;
2869
2870         /*
2871          * This MSR reports some information about VMX support. We
2872          * should return information about the VMX we emulate for the
2873          * guest, and the VMCS structure we give it - not about the
2874          * VMX support of the underlying hardware.
2875          */
2876         vmx->nested.nested_vmx_basic =
2877                 VMCS12_REVISION |
2878                 VMX_BASIC_TRUE_CTLS |
2879                 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2880                 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2881
2882         if (cpu_has_vmx_basic_inout())
2883                 vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
2884
2885         /*
2886          * These MSRs specify bits which the guest must keep fixed on
2887          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2888          * We picked the standard core2 setting.
2889          */
2890 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2891 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
2892         vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
2893         vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
2894
2895         /* These MSRs specify bits which the guest must keep fixed off. */
2896         rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
2897         rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
2898
2899         /* highest index: VMX_PREEMPTION_TIMER_VALUE */
2900         vmx->nested.nested_vmx_vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
2901 }
2902
2903 /*
2904  * if fixed0[i] == 1: val[i] must be 1
2905  * if fixed1[i] == 0: val[i] must be 0
2906  */
2907 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
2908 {
2909         return ((val & fixed1) | fixed0) == val;
2910 }
2911
2912 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2913 {
2914         return fixed_bits_valid(control, low, high);
2915 }
2916
2917 static inline u64 vmx_control_msr(u32 low, u32 high)
2918 {
2919         return low | ((u64)high << 32);
2920 }
2921
2922 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
2923 {
2924         superset &= mask;
2925         subset &= mask;
2926
2927         return (superset | subset) == superset;
2928 }
2929
2930 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
2931 {
2932         const u64 feature_and_reserved =
2933                 /* feature (except bit 48; see below) */
2934                 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
2935                 /* reserved */
2936                 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
2937         u64 vmx_basic = vmx->nested.nested_vmx_basic;
2938
2939         if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
2940                 return -EINVAL;
2941
2942         /*
2943          * KVM does not emulate a version of VMX that constrains physical
2944          * addresses of VMX structures (e.g. VMCS) to 32-bits.
2945          */
2946         if (data & BIT_ULL(48))
2947                 return -EINVAL;
2948
2949         if (vmx_basic_vmcs_revision_id(vmx_basic) !=
2950             vmx_basic_vmcs_revision_id(data))
2951                 return -EINVAL;
2952
2953         if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
2954                 return -EINVAL;
2955
2956         vmx->nested.nested_vmx_basic = data;
2957         return 0;
2958 }
2959
2960 static int
2961 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
2962 {
2963         u64 supported;
2964         u32 *lowp, *highp;
2965
2966         switch (msr_index) {
2967         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2968                 lowp = &vmx->nested.nested_vmx_pinbased_ctls_low;
2969                 highp = &vmx->nested.nested_vmx_pinbased_ctls_high;
2970                 break;
2971         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2972                 lowp = &vmx->nested.nested_vmx_procbased_ctls_low;
2973                 highp = &vmx->nested.nested_vmx_procbased_ctls_high;
2974                 break;
2975         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2976                 lowp = &vmx->nested.nested_vmx_exit_ctls_low;
2977                 highp = &vmx->nested.nested_vmx_exit_ctls_high;
2978                 break;
2979         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2980                 lowp = &vmx->nested.nested_vmx_entry_ctls_low;
2981                 highp = &vmx->nested.nested_vmx_entry_ctls_high;
2982                 break;
2983         case MSR_IA32_VMX_PROCBASED_CTLS2:
2984                 lowp = &vmx->nested.nested_vmx_secondary_ctls_low;
2985                 highp = &vmx->nested.nested_vmx_secondary_ctls_high;
2986                 break;
2987         default:
2988                 BUG();
2989         }
2990
2991         supported = vmx_control_msr(*lowp, *highp);
2992
2993         /* Check must-be-1 bits are still 1. */
2994         if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
2995                 return -EINVAL;
2996
2997         /* Check must-be-0 bits are still 0. */
2998         if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
2999                 return -EINVAL;
3000
3001         *lowp = data;
3002         *highp = data >> 32;
3003         return 0;
3004 }
3005
3006 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3007 {
3008         const u64 feature_and_reserved_bits =
3009                 /* feature */
3010                 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3011                 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3012                 /* reserved */
3013                 GENMASK_ULL(13, 9) | BIT_ULL(31);
3014         u64 vmx_misc;
3015
3016         vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low,
3017                                    vmx->nested.nested_vmx_misc_high);
3018
3019         if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3020                 return -EINVAL;
3021
3022         if ((vmx->nested.nested_vmx_pinbased_ctls_high &
3023              PIN_BASED_VMX_PREEMPTION_TIMER) &&
3024             vmx_misc_preemption_timer_rate(data) !=
3025             vmx_misc_preemption_timer_rate(vmx_misc))
3026                 return -EINVAL;
3027
3028         if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3029                 return -EINVAL;
3030
3031         if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3032                 return -EINVAL;
3033
3034         if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3035                 return -EINVAL;
3036
3037         vmx->nested.nested_vmx_misc_low = data;
3038         vmx->nested.nested_vmx_misc_high = data >> 32;
3039         return 0;
3040 }
3041
3042 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3043 {
3044         u64 vmx_ept_vpid_cap;
3045
3046         vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
3047                                            vmx->nested.nested_vmx_vpid_caps);
3048
3049         /* Every bit is either reserved or a feature bit. */
3050         if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3051                 return -EINVAL;
3052
3053         vmx->nested.nested_vmx_ept_caps = data;
3054         vmx->nested.nested_vmx_vpid_caps = data >> 32;
3055         return 0;
3056 }
3057
3058 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3059 {
3060         u64 *msr;
3061
3062         switch (msr_index) {
3063         case MSR_IA32_VMX_CR0_FIXED0:
3064                 msr = &vmx->nested.nested_vmx_cr0_fixed0;
3065                 break;
3066         case MSR_IA32_VMX_CR4_FIXED0:
3067                 msr = &vmx->nested.nested_vmx_cr4_fixed0;
3068                 break;
3069         default:
3070                 BUG();
3071         }
3072
3073         /*
3074          * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3075          * must be 1 in the restored value.
3076          */
3077         if (!is_bitwise_subset(data, *msr, -1ULL))
3078                 return -EINVAL;
3079
3080         *msr = data;
3081         return 0;
3082 }
3083
3084 /*
3085  * Called when userspace is restoring VMX MSRs.
3086  *
3087  * Returns 0 on success, non-0 otherwise.
3088  */
3089 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3090 {
3091         struct vcpu_vmx *vmx = to_vmx(vcpu);
3092
3093         switch (msr_index) {
3094         case MSR_IA32_VMX_BASIC:
3095                 return vmx_restore_vmx_basic(vmx, data);
3096         case MSR_IA32_VMX_PINBASED_CTLS:
3097         case MSR_IA32_VMX_PROCBASED_CTLS:
3098         case MSR_IA32_VMX_EXIT_CTLS:
3099         case MSR_IA32_VMX_ENTRY_CTLS:
3100                 /*
3101                  * The "non-true" VMX capability MSRs are generated from the
3102                  * "true" MSRs, so we do not support restoring them directly.
3103                  *
3104                  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3105                  * should restore the "true" MSRs with the must-be-1 bits
3106                  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3107                  * DEFAULT SETTINGS".
3108                  */
3109                 return -EINVAL;
3110         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3111         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3112         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3113         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3114         case MSR_IA32_VMX_PROCBASED_CTLS2:
3115                 return vmx_restore_control_msr(vmx, msr_index, data);
3116         case MSR_IA32_VMX_MISC:
3117                 return vmx_restore_vmx_misc(vmx, data);
3118         case MSR_IA32_VMX_CR0_FIXED0:
3119         case MSR_IA32_VMX_CR4_FIXED0:
3120                 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3121         case MSR_IA32_VMX_CR0_FIXED1:
3122         case MSR_IA32_VMX_CR4_FIXED1:
3123                 /*
3124                  * These MSRs are generated based on the vCPU's CPUID, so we
3125                  * do not support restoring them directly.
3126                  */
3127                 return -EINVAL;
3128         case MSR_IA32_VMX_EPT_VPID_CAP:
3129                 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3130         case MSR_IA32_VMX_VMCS_ENUM:
3131                 vmx->nested.nested_vmx_vmcs_enum = data;
3132                 return 0;
3133         default:
3134                 /*
3135                  * The rest of the VMX capability MSRs do not support restore.
3136                  */
3137                 return -EINVAL;
3138         }
3139 }
3140
3141 /* Returns 0 on success, non-0 otherwise. */
3142 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
3143 {
3144         struct vcpu_vmx *vmx = to_vmx(vcpu);
3145
3146         switch (msr_index) {
3147         case MSR_IA32_VMX_BASIC:
3148                 *pdata = vmx->nested.nested_vmx_basic;
3149                 break;
3150         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3151         case MSR_IA32_VMX_PINBASED_CTLS:
3152                 *pdata = vmx_control_msr(
3153                         vmx->nested.nested_vmx_pinbased_ctls_low,
3154                         vmx->nested.nested_vmx_pinbased_ctls_high);
3155                 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3156                         *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3157                 break;
3158         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3159         case MSR_IA32_VMX_PROCBASED_CTLS:
3160                 *pdata = vmx_control_msr(
3161                         vmx->nested.nested_vmx_procbased_ctls_low,
3162                         vmx->nested.nested_vmx_procbased_ctls_high);
3163                 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3164                         *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3165                 break;
3166         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3167         case MSR_IA32_VMX_EXIT_CTLS:
3168                 *pdata = vmx_control_msr(
3169                         vmx->nested.nested_vmx_exit_ctls_low,
3170                         vmx->nested.nested_vmx_exit_ctls_high);
3171                 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3172                         *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3173                 break;
3174         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3175         case MSR_IA32_VMX_ENTRY_CTLS:
3176                 *pdata = vmx_control_msr(
3177                         vmx->nested.nested_vmx_entry_ctls_low,
3178                         vmx->nested.nested_vmx_entry_ctls_high);
3179                 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3180                         *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3181                 break;
3182         case MSR_IA32_VMX_MISC:
3183                 *pdata = vmx_control_msr(
3184                         vmx->nested.nested_vmx_misc_low,
3185                         vmx->nested.nested_vmx_misc_high);
3186                 break;
3187         case MSR_IA32_VMX_CR0_FIXED0:
3188                 *pdata = vmx->nested.nested_vmx_cr0_fixed0;
3189                 break;
3190         case MSR_IA32_VMX_CR0_FIXED1:
3191                 *pdata = vmx->nested.nested_vmx_cr0_fixed1;
3192                 break;
3193         case MSR_IA32_VMX_CR4_FIXED0:
3194                 *pdata = vmx->nested.nested_vmx_cr4_fixed0;
3195                 break;
3196         case MSR_IA32_VMX_CR4_FIXED1:
3197                 *pdata = vmx->nested.nested_vmx_cr4_fixed1;
3198                 break;
3199         case MSR_IA32_VMX_VMCS_ENUM:
3200                 *pdata = vmx->nested.nested_vmx_vmcs_enum;
3201                 break;
3202         case MSR_IA32_VMX_PROCBASED_CTLS2:
3203                 *pdata = vmx_control_msr(
3204                         vmx->nested.nested_vmx_secondary_ctls_low,
3205                         vmx->nested.nested_vmx_secondary_ctls_high);
3206                 break;
3207         case MSR_IA32_VMX_EPT_VPID_CAP:
3208                 *pdata = vmx->nested.nested_vmx_ept_caps |
3209                         ((u64)vmx->nested.nested_vmx_vpid_caps << 32);
3210                 break;
3211         case MSR_IA32_VMX_VMFUNC:
3212                 *pdata = vmx->nested.nested_vmx_vmfunc_controls;
3213                 break;
3214         default:
3215                 return 1;
3216         }
3217
3218         return 0;
3219 }
3220
3221 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
3222                                                  uint64_t val)
3223 {
3224         uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
3225
3226         return !(val & ~valid_bits);
3227 }
3228
3229 /*
3230  * Reads an msr value (of 'msr_index') into 'pdata'.
3231  * Returns 0 on success, non-0 otherwise.
3232  * Assumes vcpu_load() was already called.
3233  */
3234 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3235 {
3236         struct vcpu_vmx *vmx = to_vmx(vcpu);
3237         struct shared_msr_entry *msr;
3238
3239         switch (msr_info->index) {
3240 #ifdef CONFIG_X86_64
3241         case MSR_FS_BASE:
3242                 msr_info->data = vmcs_readl(GUEST_FS_BASE);
3243                 break;
3244         case MSR_GS_BASE:
3245                 msr_info->data = vmcs_readl(GUEST_GS_BASE);
3246                 break;
3247         case MSR_KERNEL_GS_BASE:
3248                 vmx_load_host_state(vmx);
3249                 msr_info->data = vmx->msr_guest_kernel_gs_base;
3250                 break;
3251 #endif
3252         case MSR_EFER:
3253                 return kvm_get_msr_common(vcpu, msr_info);
3254         case MSR_IA32_TSC:
3255                 msr_info->data = guest_read_tsc(vcpu);
3256                 break;
3257         case MSR_IA32_SPEC_CTRL:
3258                 if (!msr_info->host_initiated &&
3259                     !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3260                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3261                         return 1;
3262
3263                 msr_info->data = to_vmx(vcpu)->spec_ctrl;
3264                 break;
3265         case MSR_IA32_ARCH_CAPABILITIES:
3266                 if (!msr_info->host_initiated &&
3267                     !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3268                         return 1;
3269                 msr_info->data = to_vmx(vcpu)->arch_capabilities;
3270                 break;
3271         case MSR_IA32_SYSENTER_CS:
3272                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
3273                 break;
3274         case MSR_IA32_SYSENTER_EIP:
3275                 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
3276                 break;
3277         case MSR_IA32_SYSENTER_ESP:
3278                 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
3279                 break;
3280         case MSR_IA32_BNDCFGS:
3281                 if (!kvm_mpx_supported() ||
3282                     (!msr_info->host_initiated &&
3283                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3284                         return 1;
3285                 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
3286                 break;
3287         case MSR_IA32_MCG_EXT_CTL:
3288                 if (!msr_info->host_initiated &&
3289                     !(vmx->msr_ia32_feature_control &
3290                       FEATURE_CONTROL_LMCE))
3291                         return 1;
3292                 msr_info->data = vcpu->arch.mcg_ext_ctl;
3293                 break;
3294         case MSR_IA32_FEATURE_CONTROL:
3295                 msr_info->data = vmx->msr_ia32_feature_control;
3296                 break;
3297         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3298                 if (!nested_vmx_allowed(vcpu))
3299                         return 1;
3300                 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
3301         case MSR_IA32_XSS:
3302                 if (!vmx_xsaves_supported())
3303                         return 1;
3304                 msr_info->data = vcpu->arch.ia32_xss;
3305                 break;
3306         case MSR_TSC_AUX:
3307                 if (!msr_info->host_initiated &&
3308                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3309                         return 1;
3310                 /* Otherwise falls through */
3311         default:
3312                 msr = find_msr_entry(vmx, msr_info->index);
3313                 if (msr) {
3314                         msr_info->data = msr->data;
3315                         break;
3316                 }
3317                 return kvm_get_msr_common(vcpu, msr_info);
3318         }
3319
3320         return 0;
3321 }
3322
3323 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
3324
3325 /*
3326  * Writes msr value into into the appropriate "register".
3327  * Returns 0 on success, non-0 otherwise.
3328  * Assumes vcpu_load() was already called.
3329  */
3330 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3331 {
3332         struct vcpu_vmx *vmx = to_vmx(vcpu);
3333         struct shared_msr_entry *msr;
3334         int ret = 0;
3335         u32 msr_index = msr_info->index;
3336         u64 data = msr_info->data;
3337
3338         switch (msr_index) {
3339         case MSR_EFER:
3340                 ret = kvm_set_msr_common(vcpu, msr_info);
3341                 break;
3342 #ifdef CONFIG_X86_64
3343         case MSR_FS_BASE:
3344                 vmx_segment_cache_clear(vmx);
3345                 vmcs_writel(GUEST_FS_BASE, data);
3346                 break;
3347         case MSR_GS_BASE:
3348                 vmx_segment_cache_clear(vmx);
3349                 vmcs_writel(GUEST_GS_BASE, data);
3350                 break;
3351         case MSR_KERNEL_GS_BASE:
3352                 vmx_load_host_state(vmx);
3353                 vmx->msr_guest_kernel_gs_base = data;
3354                 break;
3355 #endif
3356         case MSR_IA32_SYSENTER_CS:
3357                 vmcs_write32(GUEST_SYSENTER_CS, data);
3358                 break;
3359         case MSR_IA32_SYSENTER_EIP:
3360                 vmcs_writel(GUEST_SYSENTER_EIP, data);
3361                 break;
3362         case MSR_IA32_SYSENTER_ESP:
3363                 vmcs_writel(GUEST_SYSENTER_ESP, data);
3364                 break;
3365         case MSR_IA32_BNDCFGS:
3366                 if (!kvm_mpx_supported() ||
3367                     (!msr_info->host_initiated &&
3368                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3369                         return 1;
3370                 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
3371                     (data & MSR_IA32_BNDCFGS_RSVD))
3372                         return 1;
3373                 vmcs_write64(GUEST_BNDCFGS, data);
3374                 break;
3375         case MSR_IA32_TSC:
3376                 kvm_write_tsc(vcpu, msr_info);
3377                 break;
3378         case MSR_IA32_SPEC_CTRL:
3379                 if (!msr_info->host_initiated &&
3380                     !guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
3381                     !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
3382                         return 1;
3383
3384                 /* The STIBP bit doesn't fault even if it's not advertised */
3385                 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
3386                         return 1;
3387
3388                 vmx->spec_ctrl = data;
3389
3390                 if (!data)
3391                         break;
3392
3393                 /*
3394                  * For non-nested:
3395                  * When it's written (to non-zero) for the first time, pass
3396                  * it through.
3397                  *
3398                  * For nested:
3399                  * The handling of the MSR bitmap for L2 guests is done in
3400                  * nested_vmx_merge_msr_bitmap. We should not touch the
3401                  * vmcs02.msr_bitmap here since it gets completely overwritten
3402                  * in the merging. We update the vmcs01 here for L1 as well
3403                  * since it will end up touching the MSR anyway now.