bb5b4888505bdccc4a505aa1adcde95d9edd62f8
[sfrench/cifs-2.6.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include "kvm_cache_regs.h"
38 #include "x86.h"
39
40 #include <asm/cpu.h>
41 #include <asm/io.h>
42 #include <asm/desc.h>
43 #include <asm/vmx.h>
44 #include <asm/virtext.h>
45 #include <asm/mce.h>
46 #include <asm/fpu/internal.h>
47 #include <asm/perf_event.h>
48 #include <asm/debugreg.h>
49 #include <asm/kexec.h>
50 #include <asm/apic.h>
51 #include <asm/irq_remapping.h>
52 #include <asm/mmu_context.h>
53 #include <asm/nospec-branch.h>
54
55 #include "trace.h"
56 #include "pmu.h"
57
58 #define __ex(x) __kvm_handle_fault_on_reboot(x)
59 #define __ex_clear(x, reg) \
60         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
61
62 MODULE_AUTHOR("Qumranet");
63 MODULE_LICENSE("GPL");
64
65 static const struct x86_cpu_id vmx_cpu_id[] = {
66         X86_FEATURE_MATCH(X86_FEATURE_VMX),
67         {}
68 };
69 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
70
71 static bool __read_mostly enable_vpid = 1;
72 module_param_named(vpid, enable_vpid, bool, 0444);
73
74 static bool __read_mostly enable_vnmi = 1;
75 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
76
77 static bool __read_mostly flexpriority_enabled = 1;
78 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
79
80 static bool __read_mostly enable_ept = 1;
81 module_param_named(ept, enable_ept, bool, S_IRUGO);
82
83 static bool __read_mostly enable_unrestricted_guest = 1;
84 module_param_named(unrestricted_guest,
85                         enable_unrestricted_guest, bool, S_IRUGO);
86
87 static bool __read_mostly enable_ept_ad_bits = 1;
88 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
89
90 static bool __read_mostly emulate_invalid_guest_state = true;
91 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
92
93 static bool __read_mostly fasteoi = 1;
94 module_param(fasteoi, bool, S_IRUGO);
95
96 static bool __read_mostly enable_apicv = 1;
97 module_param(enable_apicv, bool, S_IRUGO);
98
99 static bool __read_mostly enable_shadow_vmcs = 1;
100 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
101 /*
102  * If nested=1, nested virtualization is supported, i.e., guests may use
103  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
104  * use VMX instructions.
105  */
106 static bool __read_mostly nested = 0;
107 module_param(nested, bool, S_IRUGO);
108
109 static u64 __read_mostly host_xss;
110
111 static bool __read_mostly enable_pml = 1;
112 module_param_named(pml, enable_pml, bool, S_IRUGO);
113
114 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
115
116 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
117 static int __read_mostly cpu_preemption_timer_multi;
118 static bool __read_mostly enable_preemption_timer = 1;
119 #ifdef CONFIG_X86_64
120 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
121 #endif
122
123 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
124 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
125 #define KVM_VM_CR0_ALWAYS_ON                                            \
126         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
127 #define KVM_CR4_GUEST_OWNED_BITS                                      \
128         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
129          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
130
131 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
132 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
133
134 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
135
136 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
137
138 /*
139  * Hyper-V requires all of these, so mark them as supported even though
140  * they are just treated the same as all-context.
141  */
142 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
143         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
144         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
145         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
146         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
147
148 /*
149  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
150  * ple_gap:    upper bound on the amount of time between two successive
151  *             executions of PAUSE in a loop. Also indicate if ple enabled.
152  *             According to test, this time is usually smaller than 128 cycles.
153  * ple_window: upper bound on the amount of time a guest is allowed to execute
154  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
155  *             less than 2^12 cycles
156  * Time is measured based on a counter that runs at the same rate as the TSC,
157  * refer SDM volume 3b section 21.6.13 & 22.1.3.
158  */
159 #define KVM_VMX_DEFAULT_PLE_GAP           128
160 #define KVM_VMX_DEFAULT_PLE_WINDOW        4096
161 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW   2
162 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
163 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX    \
164                 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
165
166 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
167 module_param(ple_gap, int, S_IRUGO);
168
169 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
170 module_param(ple_window, int, S_IRUGO);
171
172 /* Default doubles per-vcpu window every exit. */
173 static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
174 module_param(ple_window_grow, int, S_IRUGO);
175
176 /* Default resets per-vcpu window every exit to ple_window. */
177 static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
178 module_param(ple_window_shrink, int, S_IRUGO);
179
180 /* Default is to compute the maximum so we can never overflow. */
181 static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
182 static int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
183 module_param(ple_window_max, int, S_IRUGO);
184
185 extern const ulong vmx_return;
186
187 #define NR_AUTOLOAD_MSRS 8
188
189 struct vmcs {
190         u32 revision_id;
191         u32 abort;
192         char data[0];
193 };
194
195 /*
196  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
197  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
198  * loaded on this CPU (so we can clear them if the CPU goes down).
199  */
200 struct loaded_vmcs {
201         struct vmcs *vmcs;
202         struct vmcs *shadow_vmcs;
203         int cpu;
204         bool launched;
205         bool nmi_known_unmasked;
206         unsigned long vmcs_host_cr3;    /* May not match real cr3 */
207         unsigned long vmcs_host_cr4;    /* May not match real cr4 */
208         /* Support for vnmi-less CPUs */
209         int soft_vnmi_blocked;
210         ktime_t entry_time;
211         s64 vnmi_blocked_time;
212         struct list_head loaded_vmcss_on_cpu_link;
213 };
214
215 struct shared_msr_entry {
216         unsigned index;
217         u64 data;
218         u64 mask;
219 };
220
221 /*
222  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
223  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
224  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
225  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
226  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
227  * More than one of these structures may exist, if L1 runs multiple L2 guests.
228  * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
229  * underlying hardware which will be used to run L2.
230  * This structure is packed to ensure that its layout is identical across
231  * machines (necessary for live migration).
232  * If there are changes in this struct, VMCS12_REVISION must be changed.
233  */
234 typedef u64 natural_width;
235 struct __packed vmcs12 {
236         /* According to the Intel spec, a VMCS region must start with the
237          * following two fields. Then follow implementation-specific data.
238          */
239         u32 revision_id;
240         u32 abort;
241
242         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
243         u32 padding[7]; /* room for future expansion */
244
245         u64 io_bitmap_a;
246         u64 io_bitmap_b;
247         u64 msr_bitmap;
248         u64 vm_exit_msr_store_addr;
249         u64 vm_exit_msr_load_addr;
250         u64 vm_entry_msr_load_addr;
251         u64 tsc_offset;
252         u64 virtual_apic_page_addr;
253         u64 apic_access_addr;
254         u64 posted_intr_desc_addr;
255         u64 vm_function_control;
256         u64 ept_pointer;
257         u64 eoi_exit_bitmap0;
258         u64 eoi_exit_bitmap1;
259         u64 eoi_exit_bitmap2;
260         u64 eoi_exit_bitmap3;
261         u64 eptp_list_address;
262         u64 xss_exit_bitmap;
263         u64 guest_physical_address;
264         u64 vmcs_link_pointer;
265         u64 pml_address;
266         u64 guest_ia32_debugctl;
267         u64 guest_ia32_pat;
268         u64 guest_ia32_efer;
269         u64 guest_ia32_perf_global_ctrl;
270         u64 guest_pdptr0;
271         u64 guest_pdptr1;
272         u64 guest_pdptr2;
273         u64 guest_pdptr3;
274         u64 guest_bndcfgs;
275         u64 host_ia32_pat;
276         u64 host_ia32_efer;
277         u64 host_ia32_perf_global_ctrl;
278         u64 padding64[8]; /* room for future expansion */
279         /*
280          * To allow migration of L1 (complete with its L2 guests) between
281          * machines of different natural widths (32 or 64 bit), we cannot have
282          * unsigned long fields with no explict size. We use u64 (aliased
283          * natural_width) instead. Luckily, x86 is little-endian.
284          */
285         natural_width cr0_guest_host_mask;
286         natural_width cr4_guest_host_mask;
287         natural_width cr0_read_shadow;
288         natural_width cr4_read_shadow;
289         natural_width cr3_target_value0;
290         natural_width cr3_target_value1;
291         natural_width cr3_target_value2;
292         natural_width cr3_target_value3;
293         natural_width exit_qualification;
294         natural_width guest_linear_address;
295         natural_width guest_cr0;
296         natural_width guest_cr3;
297         natural_width guest_cr4;
298         natural_width guest_es_base;
299         natural_width guest_cs_base;
300         natural_width guest_ss_base;
301         natural_width guest_ds_base;
302         natural_width guest_fs_base;
303         natural_width guest_gs_base;
304         natural_width guest_ldtr_base;
305         natural_width guest_tr_base;
306         natural_width guest_gdtr_base;
307         natural_width guest_idtr_base;
308         natural_width guest_dr7;
309         natural_width guest_rsp;
310         natural_width guest_rip;
311         natural_width guest_rflags;
312         natural_width guest_pending_dbg_exceptions;
313         natural_width guest_sysenter_esp;
314         natural_width guest_sysenter_eip;
315         natural_width host_cr0;
316         natural_width host_cr3;
317         natural_width host_cr4;
318         natural_width host_fs_base;
319         natural_width host_gs_base;
320         natural_width host_tr_base;
321         natural_width host_gdtr_base;
322         natural_width host_idtr_base;
323         natural_width host_ia32_sysenter_esp;
324         natural_width host_ia32_sysenter_eip;
325         natural_width host_rsp;
326         natural_width host_rip;
327         natural_width paddingl[8]; /* room for future expansion */
328         u32 pin_based_vm_exec_control;
329         u32 cpu_based_vm_exec_control;
330         u32 exception_bitmap;
331         u32 page_fault_error_code_mask;
332         u32 page_fault_error_code_match;
333         u32 cr3_target_count;
334         u32 vm_exit_controls;
335         u32 vm_exit_msr_store_count;
336         u32 vm_exit_msr_load_count;
337         u32 vm_entry_controls;
338         u32 vm_entry_msr_load_count;
339         u32 vm_entry_intr_info_field;
340         u32 vm_entry_exception_error_code;
341         u32 vm_entry_instruction_len;
342         u32 tpr_threshold;
343         u32 secondary_vm_exec_control;
344         u32 vm_instruction_error;
345         u32 vm_exit_reason;
346         u32 vm_exit_intr_info;
347         u32 vm_exit_intr_error_code;
348         u32 idt_vectoring_info_field;
349         u32 idt_vectoring_error_code;
350         u32 vm_exit_instruction_len;
351         u32 vmx_instruction_info;
352         u32 guest_es_limit;
353         u32 guest_cs_limit;
354         u32 guest_ss_limit;
355         u32 guest_ds_limit;
356         u32 guest_fs_limit;
357         u32 guest_gs_limit;
358         u32 guest_ldtr_limit;
359         u32 guest_tr_limit;
360         u32 guest_gdtr_limit;
361         u32 guest_idtr_limit;
362         u32 guest_es_ar_bytes;
363         u32 guest_cs_ar_bytes;
364         u32 guest_ss_ar_bytes;
365         u32 guest_ds_ar_bytes;
366         u32 guest_fs_ar_bytes;
367         u32 guest_gs_ar_bytes;
368         u32 guest_ldtr_ar_bytes;
369         u32 guest_tr_ar_bytes;
370         u32 guest_interruptibility_info;
371         u32 guest_activity_state;
372         u32 guest_sysenter_cs;
373         u32 host_ia32_sysenter_cs;
374         u32 vmx_preemption_timer_value;
375         u32 padding32[7]; /* room for future expansion */
376         u16 virtual_processor_id;
377         u16 posted_intr_nv;
378         u16 guest_es_selector;
379         u16 guest_cs_selector;
380         u16 guest_ss_selector;
381         u16 guest_ds_selector;
382         u16 guest_fs_selector;
383         u16 guest_gs_selector;
384         u16 guest_ldtr_selector;
385         u16 guest_tr_selector;
386         u16 guest_intr_status;
387         u16 guest_pml_index;
388         u16 host_es_selector;
389         u16 host_cs_selector;
390         u16 host_ss_selector;
391         u16 host_ds_selector;
392         u16 host_fs_selector;
393         u16 host_gs_selector;
394         u16 host_tr_selector;
395 };
396
397 /*
398  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
399  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
400  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
401  */
402 #define VMCS12_REVISION 0x11e57ed0
403
404 /*
405  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
406  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
407  * current implementation, 4K are reserved to avoid future complications.
408  */
409 #define VMCS12_SIZE 0x1000
410
411 /*
412  * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
413  * supported VMCS12 field encoding.
414  */
415 #define VMCS12_MAX_FIELD_INDEX 0x17
416
417 /*
418  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
419  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
420  */
421 struct nested_vmx {
422         /* Has the level1 guest done vmxon? */
423         bool vmxon;
424         gpa_t vmxon_ptr;
425         bool pml_full;
426
427         /* The guest-physical address of the current VMCS L1 keeps for L2 */
428         gpa_t current_vmptr;
429         /*
430          * Cache of the guest's VMCS, existing outside of guest memory.
431          * Loaded from guest memory during VMPTRLD. Flushed to guest
432          * memory during VMCLEAR and VMPTRLD.
433          */
434         struct vmcs12 *cached_vmcs12;
435         /*
436          * Indicates if the shadow vmcs must be updated with the
437          * data hold by vmcs12
438          */
439         bool sync_shadow_vmcs;
440         bool dirty_vmcs12;
441
442         bool change_vmcs01_virtual_x2apic_mode;
443         /* L2 must run next, and mustn't decide to exit to L1. */
444         bool nested_run_pending;
445
446         struct loaded_vmcs vmcs02;
447
448         /*
449          * Guest pages referred to in the vmcs02 with host-physical
450          * pointers, so we must keep them pinned while L2 runs.
451          */
452         struct page *apic_access_page;
453         struct page *virtual_apic_page;
454         struct page *pi_desc_page;
455         struct pi_desc *pi_desc;
456         bool pi_pending;
457         u16 posted_intr_nv;
458
459         unsigned long *msr_bitmap;
460
461         struct hrtimer preemption_timer;
462         bool preemption_timer_expired;
463
464         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
465         u64 vmcs01_debugctl;
466
467         u16 vpid02;
468         u16 last_vpid;
469
470         /*
471          * We only store the "true" versions of the VMX capability MSRs. We
472          * generate the "non-true" versions by setting the must-be-1 bits
473          * according to the SDM.
474          */
475         u32 nested_vmx_procbased_ctls_low;
476         u32 nested_vmx_procbased_ctls_high;
477         u32 nested_vmx_secondary_ctls_low;
478         u32 nested_vmx_secondary_ctls_high;
479         u32 nested_vmx_pinbased_ctls_low;
480         u32 nested_vmx_pinbased_ctls_high;
481         u32 nested_vmx_exit_ctls_low;
482         u32 nested_vmx_exit_ctls_high;
483         u32 nested_vmx_entry_ctls_low;
484         u32 nested_vmx_entry_ctls_high;
485         u32 nested_vmx_misc_low;
486         u32 nested_vmx_misc_high;
487         u32 nested_vmx_ept_caps;
488         u32 nested_vmx_vpid_caps;
489         u64 nested_vmx_basic;
490         u64 nested_vmx_cr0_fixed0;
491         u64 nested_vmx_cr0_fixed1;
492         u64 nested_vmx_cr4_fixed0;
493         u64 nested_vmx_cr4_fixed1;
494         u64 nested_vmx_vmcs_enum;
495         u64 nested_vmx_vmfunc_controls;
496
497         /* SMM related state */
498         struct {
499                 /* in VMX operation on SMM entry? */
500                 bool vmxon;
501                 /* in guest mode on SMM entry? */
502                 bool guest_mode;
503         } smm;
504 };
505
506 #define POSTED_INTR_ON  0
507 #define POSTED_INTR_SN  1
508
509 /* Posted-Interrupt Descriptor */
510 struct pi_desc {
511         u32 pir[8];     /* Posted interrupt requested */
512         union {
513                 struct {
514                                 /* bit 256 - Outstanding Notification */
515                         u16     on      : 1,
516                                 /* bit 257 - Suppress Notification */
517                                 sn      : 1,
518                                 /* bit 271:258 - Reserved */
519                                 rsvd_1  : 14;
520                                 /* bit 279:272 - Notification Vector */
521                         u8      nv;
522                                 /* bit 287:280 - Reserved */
523                         u8      rsvd_2;
524                                 /* bit 319:288 - Notification Destination */
525                         u32     ndst;
526                 };
527                 u64 control;
528         };
529         u32 rsvd[6];
530 } __aligned(64);
531
532 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
533 {
534         return test_and_set_bit(POSTED_INTR_ON,
535                         (unsigned long *)&pi_desc->control);
536 }
537
538 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
539 {
540         return test_and_clear_bit(POSTED_INTR_ON,
541                         (unsigned long *)&pi_desc->control);
542 }
543
544 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
545 {
546         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
547 }
548
549 static inline void pi_clear_sn(struct pi_desc *pi_desc)
550 {
551         return clear_bit(POSTED_INTR_SN,
552                         (unsigned long *)&pi_desc->control);
553 }
554
555 static inline void pi_set_sn(struct pi_desc *pi_desc)
556 {
557         return set_bit(POSTED_INTR_SN,
558                         (unsigned long *)&pi_desc->control);
559 }
560
561 static inline void pi_clear_on(struct pi_desc *pi_desc)
562 {
563         clear_bit(POSTED_INTR_ON,
564                   (unsigned long *)&pi_desc->control);
565 }
566
567 static inline int pi_test_on(struct pi_desc *pi_desc)
568 {
569         return test_bit(POSTED_INTR_ON,
570                         (unsigned long *)&pi_desc->control);
571 }
572
573 static inline int pi_test_sn(struct pi_desc *pi_desc)
574 {
575         return test_bit(POSTED_INTR_SN,
576                         (unsigned long *)&pi_desc->control);
577 }
578
579 struct vcpu_vmx {
580         struct kvm_vcpu       vcpu;
581         unsigned long         host_rsp;
582         u8                    fail;
583         u32                   exit_intr_info;
584         u32                   idt_vectoring_info;
585         ulong                 rflags;
586         struct shared_msr_entry *guest_msrs;
587         int                   nmsrs;
588         int                   save_nmsrs;
589         unsigned long         host_idt_base;
590 #ifdef CONFIG_X86_64
591         u64                   msr_host_kernel_gs_base;
592         u64                   msr_guest_kernel_gs_base;
593 #endif
594         u32 vm_entry_controls_shadow;
595         u32 vm_exit_controls_shadow;
596         u32 secondary_exec_control;
597
598         /*
599          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
600          * non-nested (L1) guest, it always points to vmcs01. For a nested
601          * guest (L2), it points to a different VMCS.
602          */
603         struct loaded_vmcs    vmcs01;
604         struct loaded_vmcs   *loaded_vmcs;
605         bool                  __launched; /* temporary, used in vmx_vcpu_run */
606         struct msr_autoload {
607                 unsigned nr;
608                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
609                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
610         } msr_autoload;
611         struct {
612                 int           loaded;
613                 u16           fs_sel, gs_sel, ldt_sel;
614 #ifdef CONFIG_X86_64
615                 u16           ds_sel, es_sel;
616 #endif
617                 int           gs_ldt_reload_needed;
618                 int           fs_reload_needed;
619                 u64           msr_host_bndcfgs;
620         } host_state;
621         struct {
622                 int vm86_active;
623                 ulong save_rflags;
624                 struct kvm_segment segs[8];
625         } rmode;
626         struct {
627                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
628                 struct kvm_save_segment {
629                         u16 selector;
630                         unsigned long base;
631                         u32 limit;
632                         u32 ar;
633                 } seg[8];
634         } segment_cache;
635         int vpid;
636         bool emulation_required;
637
638         u32 exit_reason;
639
640         /* Posted interrupt descriptor */
641         struct pi_desc pi_desc;
642
643         /* Support for a guest hypervisor (nested VMX) */
644         struct nested_vmx nested;
645
646         /* Dynamic PLE window. */
647         int ple_window;
648         bool ple_window_dirty;
649
650         /* Support for PML */
651 #define PML_ENTITY_NUM          512
652         struct page *pml_pg;
653
654         /* apic deadline value in host tsc */
655         u64 hv_deadline_tsc;
656
657         u64 current_tsc_ratio;
658
659         u32 host_pkru;
660
661         unsigned long host_debugctlmsr;
662
663         /*
664          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
665          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
666          * in msr_ia32_feature_control_valid_bits.
667          */
668         u64 msr_ia32_feature_control;
669         u64 msr_ia32_feature_control_valid_bits;
670 };
671
672 enum segment_cache_field {
673         SEG_FIELD_SEL = 0,
674         SEG_FIELD_BASE = 1,
675         SEG_FIELD_LIMIT = 2,
676         SEG_FIELD_AR = 3,
677
678         SEG_FIELD_NR = 4
679 };
680
681 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
682 {
683         return container_of(vcpu, struct vcpu_vmx, vcpu);
684 }
685
686 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
687 {
688         return &(to_vmx(vcpu)->pi_desc);
689 }
690
691 #define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
692 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
693 #define FIELD(number, name)     [ROL16(number, 6)] = VMCS12_OFFSET(name)
694 #define FIELD64(number, name)                                           \
695         FIELD(number, name),                                            \
696         [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
697
698
699 static u16 shadow_read_only_fields[] = {
700 #define SHADOW_FIELD_RO(x) x,
701 #include "vmx_shadow_fields.h"
702 };
703 static int max_shadow_read_only_fields =
704         ARRAY_SIZE(shadow_read_only_fields);
705
706 static u16 shadow_read_write_fields[] = {
707 #define SHADOW_FIELD_RW(x) x,
708 #include "vmx_shadow_fields.h"
709 };
710 static int max_shadow_read_write_fields =
711         ARRAY_SIZE(shadow_read_write_fields);
712
713 static const unsigned short vmcs_field_to_offset_table[] = {
714         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
715         FIELD(POSTED_INTR_NV, posted_intr_nv),
716         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
717         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
718         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
719         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
720         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
721         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
722         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
723         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
724         FIELD(GUEST_INTR_STATUS, guest_intr_status),
725         FIELD(GUEST_PML_INDEX, guest_pml_index),
726         FIELD(HOST_ES_SELECTOR, host_es_selector),
727         FIELD(HOST_CS_SELECTOR, host_cs_selector),
728         FIELD(HOST_SS_SELECTOR, host_ss_selector),
729         FIELD(HOST_DS_SELECTOR, host_ds_selector),
730         FIELD(HOST_FS_SELECTOR, host_fs_selector),
731         FIELD(HOST_GS_SELECTOR, host_gs_selector),
732         FIELD(HOST_TR_SELECTOR, host_tr_selector),
733         FIELD64(IO_BITMAP_A, io_bitmap_a),
734         FIELD64(IO_BITMAP_B, io_bitmap_b),
735         FIELD64(MSR_BITMAP, msr_bitmap),
736         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
737         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
738         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
739         FIELD64(TSC_OFFSET, tsc_offset),
740         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
741         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
742         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
743         FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
744         FIELD64(EPT_POINTER, ept_pointer),
745         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
746         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
747         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
748         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
749         FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
750         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
751         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
752         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
753         FIELD64(PML_ADDRESS, pml_address),
754         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
755         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
756         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
757         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
758         FIELD64(GUEST_PDPTR0, guest_pdptr0),
759         FIELD64(GUEST_PDPTR1, guest_pdptr1),
760         FIELD64(GUEST_PDPTR2, guest_pdptr2),
761         FIELD64(GUEST_PDPTR3, guest_pdptr3),
762         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
763         FIELD64(HOST_IA32_PAT, host_ia32_pat),
764         FIELD64(HOST_IA32_EFER, host_ia32_efer),
765         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
766         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
767         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
768         FIELD(EXCEPTION_BITMAP, exception_bitmap),
769         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
770         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
771         FIELD(CR3_TARGET_COUNT, cr3_target_count),
772         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
773         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
774         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
775         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
776         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
777         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
778         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
779         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
780         FIELD(TPR_THRESHOLD, tpr_threshold),
781         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
782         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
783         FIELD(VM_EXIT_REASON, vm_exit_reason),
784         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
785         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
786         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
787         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
788         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
789         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
790         FIELD(GUEST_ES_LIMIT, guest_es_limit),
791         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
792         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
793         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
794         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
795         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
796         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
797         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
798         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
799         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
800         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
801         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
802         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
803         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
804         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
805         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
806         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
807         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
808         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
809         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
810         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
811         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
812         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
813         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
814         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
815         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
816         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
817         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
818         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
819         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
820         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
821         FIELD(EXIT_QUALIFICATION, exit_qualification),
822         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
823         FIELD(GUEST_CR0, guest_cr0),
824         FIELD(GUEST_CR3, guest_cr3),
825         FIELD(GUEST_CR4, guest_cr4),
826         FIELD(GUEST_ES_BASE, guest_es_base),
827         FIELD(GUEST_CS_BASE, guest_cs_base),
828         FIELD(GUEST_SS_BASE, guest_ss_base),
829         FIELD(GUEST_DS_BASE, guest_ds_base),
830         FIELD(GUEST_FS_BASE, guest_fs_base),
831         FIELD(GUEST_GS_BASE, guest_gs_base),
832         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
833         FIELD(GUEST_TR_BASE, guest_tr_base),
834         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
835         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
836         FIELD(GUEST_DR7, guest_dr7),
837         FIELD(GUEST_RSP, guest_rsp),
838         FIELD(GUEST_RIP, guest_rip),
839         FIELD(GUEST_RFLAGS, guest_rflags),
840         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
841         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
842         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
843         FIELD(HOST_CR0, host_cr0),
844         FIELD(HOST_CR3, host_cr3),
845         FIELD(HOST_CR4, host_cr4),
846         FIELD(HOST_FS_BASE, host_fs_base),
847         FIELD(HOST_GS_BASE, host_gs_base),
848         FIELD(HOST_TR_BASE, host_tr_base),
849         FIELD(HOST_GDTR_BASE, host_gdtr_base),
850         FIELD(HOST_IDTR_BASE, host_idtr_base),
851         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
852         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
853         FIELD(HOST_RSP, host_rsp),
854         FIELD(HOST_RIP, host_rip),
855 };
856
857 static inline short vmcs_field_to_offset(unsigned long field)
858 {
859         unsigned index;
860
861         if (field >> 15)
862                 return -ENOENT;
863
864         index = ROL16(field, 6);
865         if (index >= ARRAY_SIZE(vmcs_field_to_offset_table))
866                 return -ENOENT;
867
868         /*
869          * FIXME: Mitigation for CVE-2017-5753.  To be replaced with a
870          * generic mechanism.
871          */
872         asm("lfence");
873
874         if (vmcs_field_to_offset_table[index] == 0)
875                 return -ENOENT;
876
877         return vmcs_field_to_offset_table[index];
878 }
879
880 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
881 {
882         return to_vmx(vcpu)->nested.cached_vmcs12;
883 }
884
885 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
886 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
887 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
888 static bool vmx_xsaves_supported(void);
889 static void vmx_set_segment(struct kvm_vcpu *vcpu,
890                             struct kvm_segment *var, int seg);
891 static void vmx_get_segment(struct kvm_vcpu *vcpu,
892                             struct kvm_segment *var, int seg);
893 static bool guest_state_valid(struct kvm_vcpu *vcpu);
894 static u32 vmx_segment_access_rights(struct kvm_segment *var);
895 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
896 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
897 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
898 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
899                                             u16 error_code);
900
901 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
902 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
903 /*
904  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
905  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
906  */
907 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
908
909 /*
910  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
911  * can find which vCPU should be waken up.
912  */
913 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
914 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
915
916 enum {
917         VMX_MSR_BITMAP_LEGACY,
918         VMX_MSR_BITMAP_LONGMODE,
919         VMX_MSR_BITMAP_LEGACY_X2APIC_APICV,
920         VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV,
921         VMX_MSR_BITMAP_LEGACY_X2APIC,
922         VMX_MSR_BITMAP_LONGMODE_X2APIC,
923         VMX_VMREAD_BITMAP,
924         VMX_VMWRITE_BITMAP,
925         VMX_BITMAP_NR
926 };
927
928 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
929
930 #define vmx_msr_bitmap_legacy                (vmx_bitmap[VMX_MSR_BITMAP_LEGACY])
931 #define vmx_msr_bitmap_longmode              (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE])
932 #define vmx_msr_bitmap_legacy_x2apic_apicv   (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV])
933 #define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV])
934 #define vmx_msr_bitmap_legacy_x2apic         (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC])
935 #define vmx_msr_bitmap_longmode_x2apic       (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC])
936 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
937 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
938
939 static bool cpu_has_load_ia32_efer;
940 static bool cpu_has_load_perf_global_ctrl;
941
942 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
943 static DEFINE_SPINLOCK(vmx_vpid_lock);
944
945 static struct vmcs_config {
946         int size;
947         int order;
948         u32 basic_cap;
949         u32 revision_id;
950         u32 pin_based_exec_ctrl;
951         u32 cpu_based_exec_ctrl;
952         u32 cpu_based_2nd_exec_ctrl;
953         u32 vmexit_ctrl;
954         u32 vmentry_ctrl;
955 } vmcs_config;
956
957 static struct vmx_capability {
958         u32 ept;
959         u32 vpid;
960 } vmx_capability;
961
962 #define VMX_SEGMENT_FIELD(seg)                                  \
963         [VCPU_SREG_##seg] = {                                   \
964                 .selector = GUEST_##seg##_SELECTOR,             \
965                 .base = GUEST_##seg##_BASE,                     \
966                 .limit = GUEST_##seg##_LIMIT,                   \
967                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
968         }
969
970 static const struct kvm_vmx_segment_field {
971         unsigned selector;
972         unsigned base;
973         unsigned limit;
974         unsigned ar_bytes;
975 } kvm_vmx_segment_fields[] = {
976         VMX_SEGMENT_FIELD(CS),
977         VMX_SEGMENT_FIELD(DS),
978         VMX_SEGMENT_FIELD(ES),
979         VMX_SEGMENT_FIELD(FS),
980         VMX_SEGMENT_FIELD(GS),
981         VMX_SEGMENT_FIELD(SS),
982         VMX_SEGMENT_FIELD(TR),
983         VMX_SEGMENT_FIELD(LDTR),
984 };
985
986 static u64 host_efer;
987
988 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
989
990 /*
991  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
992  * away by decrementing the array size.
993  */
994 static const u32 vmx_msr_index[] = {
995 #ifdef CONFIG_X86_64
996         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
997 #endif
998         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
999 };
1000
1001 static inline bool is_exception_n(u32 intr_info, u8 vector)
1002 {
1003         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1004                              INTR_INFO_VALID_MASK)) ==
1005                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1006 }
1007
1008 static inline bool is_debug(u32 intr_info)
1009 {
1010         return is_exception_n(intr_info, DB_VECTOR);
1011 }
1012
1013 static inline bool is_breakpoint(u32 intr_info)
1014 {
1015         return is_exception_n(intr_info, BP_VECTOR);
1016 }
1017
1018 static inline bool is_page_fault(u32 intr_info)
1019 {
1020         return is_exception_n(intr_info, PF_VECTOR);
1021 }
1022
1023 static inline bool is_no_device(u32 intr_info)
1024 {
1025         return is_exception_n(intr_info, NM_VECTOR);
1026 }
1027
1028 static inline bool is_invalid_opcode(u32 intr_info)
1029 {
1030         return is_exception_n(intr_info, UD_VECTOR);
1031 }
1032
1033 static inline bool is_external_interrupt(u32 intr_info)
1034 {
1035         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1036                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1037 }
1038
1039 static inline bool is_machine_check(u32 intr_info)
1040 {
1041         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1042                              INTR_INFO_VALID_MASK)) ==
1043                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1044 }
1045
1046 static inline bool cpu_has_vmx_msr_bitmap(void)
1047 {
1048         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1049 }
1050
1051 static inline bool cpu_has_vmx_tpr_shadow(void)
1052 {
1053         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1054 }
1055
1056 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1057 {
1058         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1059 }
1060
1061 static inline bool cpu_has_secondary_exec_ctrls(void)
1062 {
1063         return vmcs_config.cpu_based_exec_ctrl &
1064                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1065 }
1066
1067 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1068 {
1069         return vmcs_config.cpu_based_2nd_exec_ctrl &
1070                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1071 }
1072
1073 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1074 {
1075         return vmcs_config.cpu_based_2nd_exec_ctrl &
1076                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1077 }
1078
1079 static inline bool cpu_has_vmx_apic_register_virt(void)
1080 {
1081         return vmcs_config.cpu_based_2nd_exec_ctrl &
1082                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1083 }
1084
1085 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1086 {
1087         return vmcs_config.cpu_based_2nd_exec_ctrl &
1088                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1089 }
1090
1091 /*
1092  * Comment's format: document - errata name - stepping - processor name.
1093  * Refer from
1094  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1095  */
1096 static u32 vmx_preemption_cpu_tfms[] = {
1097 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1098 0x000206E6,
1099 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1100 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1101 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1102 0x00020652,
1103 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1104 0x00020655,
1105 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1106 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1107 /*
1108  * 320767.pdf - AAP86  - B1 -
1109  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1110  */
1111 0x000106E5,
1112 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1113 0x000106A0,
1114 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1115 0x000106A1,
1116 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1117 0x000106A4,
1118  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1119  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1120  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1121 0x000106A5,
1122 };
1123
1124 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1125 {
1126         u32 eax = cpuid_eax(0x00000001), i;
1127
1128         /* Clear the reserved bits */
1129         eax &= ~(0x3U << 14 | 0xfU << 28);
1130         for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1131                 if (eax == vmx_preemption_cpu_tfms[i])
1132                         return true;
1133
1134         return false;
1135 }
1136
1137 static inline bool cpu_has_vmx_preemption_timer(void)
1138 {
1139         return vmcs_config.pin_based_exec_ctrl &
1140                 PIN_BASED_VMX_PREEMPTION_TIMER;
1141 }
1142
1143 static inline bool cpu_has_vmx_posted_intr(void)
1144 {
1145         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1146                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1147 }
1148
1149 static inline bool cpu_has_vmx_apicv(void)
1150 {
1151         return cpu_has_vmx_apic_register_virt() &&
1152                 cpu_has_vmx_virtual_intr_delivery() &&
1153                 cpu_has_vmx_posted_intr();
1154 }
1155
1156 static inline bool cpu_has_vmx_flexpriority(void)
1157 {
1158         return cpu_has_vmx_tpr_shadow() &&
1159                 cpu_has_vmx_virtualize_apic_accesses();
1160 }
1161
1162 static inline bool cpu_has_vmx_ept_execute_only(void)
1163 {
1164         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1165 }
1166
1167 static inline bool cpu_has_vmx_ept_2m_page(void)
1168 {
1169         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1170 }
1171
1172 static inline bool cpu_has_vmx_ept_1g_page(void)
1173 {
1174         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1175 }
1176
1177 static inline bool cpu_has_vmx_ept_4levels(void)
1178 {
1179         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1180 }
1181
1182 static inline bool cpu_has_vmx_ept_mt_wb(void)
1183 {
1184         return vmx_capability.ept & VMX_EPTP_WB_BIT;
1185 }
1186
1187 static inline bool cpu_has_vmx_ept_5levels(void)
1188 {
1189         return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1190 }
1191
1192 static inline bool cpu_has_vmx_ept_ad_bits(void)
1193 {
1194         return vmx_capability.ept & VMX_EPT_AD_BIT;
1195 }
1196
1197 static inline bool cpu_has_vmx_invept_context(void)
1198 {
1199         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1200 }
1201
1202 static inline bool cpu_has_vmx_invept_global(void)
1203 {
1204         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1205 }
1206
1207 static inline bool cpu_has_vmx_invvpid_single(void)
1208 {
1209         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1210 }
1211
1212 static inline bool cpu_has_vmx_invvpid_global(void)
1213 {
1214         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1215 }
1216
1217 static inline bool cpu_has_vmx_invvpid(void)
1218 {
1219         return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1220 }
1221
1222 static inline bool cpu_has_vmx_ept(void)
1223 {
1224         return vmcs_config.cpu_based_2nd_exec_ctrl &
1225                 SECONDARY_EXEC_ENABLE_EPT;
1226 }
1227
1228 static inline bool cpu_has_vmx_unrestricted_guest(void)
1229 {
1230         return vmcs_config.cpu_based_2nd_exec_ctrl &
1231                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1232 }
1233
1234 static inline bool cpu_has_vmx_ple(void)
1235 {
1236         return vmcs_config.cpu_based_2nd_exec_ctrl &
1237                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1238 }
1239
1240 static inline bool cpu_has_vmx_basic_inout(void)
1241 {
1242         return  (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1243 }
1244
1245 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1246 {
1247         return flexpriority_enabled && lapic_in_kernel(vcpu);
1248 }
1249
1250 static inline bool cpu_has_vmx_vpid(void)
1251 {
1252         return vmcs_config.cpu_based_2nd_exec_ctrl &
1253                 SECONDARY_EXEC_ENABLE_VPID;
1254 }
1255
1256 static inline bool cpu_has_vmx_rdtscp(void)
1257 {
1258         return vmcs_config.cpu_based_2nd_exec_ctrl &
1259                 SECONDARY_EXEC_RDTSCP;
1260 }
1261
1262 static inline bool cpu_has_vmx_invpcid(void)
1263 {
1264         return vmcs_config.cpu_based_2nd_exec_ctrl &
1265                 SECONDARY_EXEC_ENABLE_INVPCID;
1266 }
1267
1268 static inline bool cpu_has_virtual_nmis(void)
1269 {
1270         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1271 }
1272
1273 static inline bool cpu_has_vmx_wbinvd_exit(void)
1274 {
1275         return vmcs_config.cpu_based_2nd_exec_ctrl &
1276                 SECONDARY_EXEC_WBINVD_EXITING;
1277 }
1278
1279 static inline bool cpu_has_vmx_shadow_vmcs(void)
1280 {
1281         u64 vmx_msr;
1282         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1283         /* check if the cpu supports writing r/o exit information fields */
1284         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1285                 return false;
1286
1287         return vmcs_config.cpu_based_2nd_exec_ctrl &
1288                 SECONDARY_EXEC_SHADOW_VMCS;
1289 }
1290
1291 static inline bool cpu_has_vmx_pml(void)
1292 {
1293         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1294 }
1295
1296 static inline bool cpu_has_vmx_tsc_scaling(void)
1297 {
1298         return vmcs_config.cpu_based_2nd_exec_ctrl &
1299                 SECONDARY_EXEC_TSC_SCALING;
1300 }
1301
1302 static inline bool cpu_has_vmx_vmfunc(void)
1303 {
1304         return vmcs_config.cpu_based_2nd_exec_ctrl &
1305                 SECONDARY_EXEC_ENABLE_VMFUNC;
1306 }
1307
1308 static inline bool report_flexpriority(void)
1309 {
1310         return flexpriority_enabled;
1311 }
1312
1313 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1314 {
1315         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low);
1316 }
1317
1318 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1319 {
1320         return vmcs12->cpu_based_vm_exec_control & bit;
1321 }
1322
1323 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1324 {
1325         return (vmcs12->cpu_based_vm_exec_control &
1326                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1327                 (vmcs12->secondary_vm_exec_control & bit);
1328 }
1329
1330 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1331 {
1332         return vmcs12->pin_based_vm_exec_control &
1333                 PIN_BASED_VMX_PREEMPTION_TIMER;
1334 }
1335
1336 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1337 {
1338         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1339 }
1340
1341 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1342 {
1343         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1344 }
1345
1346 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1347 {
1348         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1349 }
1350
1351 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1352 {
1353         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1354 }
1355
1356 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1357 {
1358         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1359 }
1360
1361 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1362 {
1363         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1364 }
1365
1366 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1367 {
1368         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1369 }
1370
1371 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1372 {
1373         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1374 }
1375
1376 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
1377 {
1378         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
1379 }
1380
1381 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
1382 {
1383         return nested_cpu_has_vmfunc(vmcs12) &&
1384                 (vmcs12->vm_function_control &
1385                  VMX_VMFUNC_EPTP_SWITCHING);
1386 }
1387
1388 static inline bool is_nmi(u32 intr_info)
1389 {
1390         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1391                 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
1392 }
1393
1394 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1395                               u32 exit_intr_info,
1396                               unsigned long exit_qualification);
1397 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1398                         struct vmcs12 *vmcs12,
1399                         u32 reason, unsigned long qualification);
1400
1401 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1402 {
1403         int i;
1404
1405         for (i = 0; i < vmx->nmsrs; ++i)
1406                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1407                         return i;
1408         return -1;
1409 }
1410
1411 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1412 {
1413     struct {
1414         u64 vpid : 16;
1415         u64 rsvd : 48;
1416         u64 gva;
1417     } operand = { vpid, 0, gva };
1418
1419     asm volatile (__ex(ASM_VMX_INVVPID)
1420                   /* CF==1 or ZF==1 --> rc = -1 */
1421                   "; ja 1f ; ud2 ; 1:"
1422                   : : "a"(&operand), "c"(ext) : "cc", "memory");
1423 }
1424
1425 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1426 {
1427         struct {
1428                 u64 eptp, gpa;
1429         } operand = {eptp, gpa};
1430
1431         asm volatile (__ex(ASM_VMX_INVEPT)
1432                         /* CF==1 or ZF==1 --> rc = -1 */
1433                         "; ja 1f ; ud2 ; 1:\n"
1434                         : : "a" (&operand), "c" (ext) : "cc", "memory");
1435 }
1436
1437 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1438 {
1439         int i;
1440
1441         i = __find_msr_index(vmx, msr);
1442         if (i >= 0)
1443                 return &vmx->guest_msrs[i];
1444         return NULL;
1445 }
1446
1447 static void vmcs_clear(struct vmcs *vmcs)
1448 {
1449         u64 phys_addr = __pa(vmcs);
1450         u8 error;
1451
1452         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1453                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1454                       : "cc", "memory");
1455         if (error)
1456                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1457                        vmcs, phys_addr);
1458 }
1459
1460 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1461 {
1462         vmcs_clear(loaded_vmcs->vmcs);
1463         if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1464                 vmcs_clear(loaded_vmcs->shadow_vmcs);
1465         loaded_vmcs->cpu = -1;
1466         loaded_vmcs->launched = 0;
1467 }
1468
1469 static void vmcs_load(struct vmcs *vmcs)
1470 {
1471         u64 phys_addr = __pa(vmcs);
1472         u8 error;
1473
1474         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1475                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1476                         : "cc", "memory");
1477         if (error)
1478                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1479                        vmcs, phys_addr);
1480 }
1481
1482 #ifdef CONFIG_KEXEC_CORE
1483 /*
1484  * This bitmap is used to indicate whether the vmclear
1485  * operation is enabled on all cpus. All disabled by
1486  * default.
1487  */
1488 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1489
1490 static inline void crash_enable_local_vmclear(int cpu)
1491 {
1492         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1493 }
1494
1495 static inline void crash_disable_local_vmclear(int cpu)
1496 {
1497         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1498 }
1499
1500 static inline int crash_local_vmclear_enabled(int cpu)
1501 {
1502         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1503 }
1504
1505 static void crash_vmclear_local_loaded_vmcss(void)
1506 {
1507         int cpu = raw_smp_processor_id();
1508         struct loaded_vmcs *v;
1509
1510         if (!crash_local_vmclear_enabled(cpu))
1511                 return;
1512
1513         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1514                             loaded_vmcss_on_cpu_link)
1515                 vmcs_clear(v->vmcs);
1516 }
1517 #else
1518 static inline void crash_enable_local_vmclear(int cpu) { }
1519 static inline void crash_disable_local_vmclear(int cpu) { }
1520 #endif /* CONFIG_KEXEC_CORE */
1521
1522 static void __loaded_vmcs_clear(void *arg)
1523 {
1524         struct loaded_vmcs *loaded_vmcs = arg;
1525         int cpu = raw_smp_processor_id();
1526
1527         if (loaded_vmcs->cpu != cpu)
1528                 return; /* vcpu migration can race with cpu offline */
1529         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1530                 per_cpu(current_vmcs, cpu) = NULL;
1531         crash_disable_local_vmclear(cpu);
1532         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1533
1534         /*
1535          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1536          * is before setting loaded_vmcs->vcpu to -1 which is done in
1537          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1538          * then adds the vmcs into percpu list before it is deleted.
1539          */
1540         smp_wmb();
1541
1542         loaded_vmcs_init(loaded_vmcs);
1543         crash_enable_local_vmclear(cpu);
1544 }
1545
1546 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1547 {
1548         int cpu = loaded_vmcs->cpu;
1549
1550         if (cpu != -1)
1551                 smp_call_function_single(cpu,
1552                          __loaded_vmcs_clear, loaded_vmcs, 1);
1553 }
1554
1555 static inline void vpid_sync_vcpu_single(int vpid)
1556 {
1557         if (vpid == 0)
1558                 return;
1559
1560         if (cpu_has_vmx_invvpid_single())
1561                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
1562 }
1563
1564 static inline void vpid_sync_vcpu_global(void)
1565 {
1566         if (cpu_has_vmx_invvpid_global())
1567                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1568 }
1569
1570 static inline void vpid_sync_context(int vpid)
1571 {
1572         if (cpu_has_vmx_invvpid_single())
1573                 vpid_sync_vcpu_single(vpid);
1574         else
1575                 vpid_sync_vcpu_global();
1576 }
1577
1578 static inline void ept_sync_global(void)
1579 {
1580         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1581 }
1582
1583 static inline void ept_sync_context(u64 eptp)
1584 {
1585         if (cpu_has_vmx_invept_context())
1586                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1587         else
1588                 ept_sync_global();
1589 }
1590
1591 static __always_inline void vmcs_check16(unsigned long field)
1592 {
1593         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1594                          "16-bit accessor invalid for 64-bit field");
1595         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1596                          "16-bit accessor invalid for 64-bit high field");
1597         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1598                          "16-bit accessor invalid for 32-bit high field");
1599         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1600                          "16-bit accessor invalid for natural width field");
1601 }
1602
1603 static __always_inline void vmcs_check32(unsigned long field)
1604 {
1605         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1606                          "32-bit accessor invalid for 16-bit field");
1607         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1608                          "32-bit accessor invalid for natural width field");
1609 }
1610
1611 static __always_inline void vmcs_check64(unsigned long field)
1612 {
1613         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1614                          "64-bit accessor invalid for 16-bit field");
1615         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1616                          "64-bit accessor invalid for 64-bit high field");
1617         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1618                          "64-bit accessor invalid for 32-bit field");
1619         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1620                          "64-bit accessor invalid for natural width field");
1621 }
1622
1623 static __always_inline void vmcs_checkl(unsigned long field)
1624 {
1625         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1626                          "Natural width accessor invalid for 16-bit field");
1627         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1628                          "Natural width accessor invalid for 64-bit field");
1629         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1630                          "Natural width accessor invalid for 64-bit high field");
1631         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1632                          "Natural width accessor invalid for 32-bit field");
1633 }
1634
1635 static __always_inline unsigned long __vmcs_readl(unsigned long field)
1636 {
1637         unsigned long value;
1638
1639         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1640                       : "=a"(value) : "d"(field) : "cc");
1641         return value;
1642 }
1643
1644 static __always_inline u16 vmcs_read16(unsigned long field)
1645 {
1646         vmcs_check16(field);
1647         return __vmcs_readl(field);
1648 }
1649
1650 static __always_inline u32 vmcs_read32(unsigned long field)
1651 {
1652         vmcs_check32(field);
1653         return __vmcs_readl(field);
1654 }
1655
1656 static __always_inline u64 vmcs_read64(unsigned long field)
1657 {
1658         vmcs_check64(field);
1659 #ifdef CONFIG_X86_64
1660         return __vmcs_readl(field);
1661 #else
1662         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
1663 #endif
1664 }
1665
1666 static __always_inline unsigned long vmcs_readl(unsigned long field)
1667 {
1668         vmcs_checkl(field);
1669         return __vmcs_readl(field);
1670 }
1671
1672 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1673 {
1674         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1675                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1676         dump_stack();
1677 }
1678
1679 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
1680 {
1681         u8 error;
1682
1683         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1684                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1685         if (unlikely(error))
1686                 vmwrite_error(field, value);
1687 }
1688
1689 static __always_inline void vmcs_write16(unsigned long field, u16 value)
1690 {
1691         vmcs_check16(field);
1692         __vmcs_writel(field, value);
1693 }
1694
1695 static __always_inline void vmcs_write32(unsigned long field, u32 value)
1696 {
1697         vmcs_check32(field);
1698         __vmcs_writel(field, value);
1699 }
1700
1701 static __always_inline void vmcs_write64(unsigned long field, u64 value)
1702 {
1703         vmcs_check64(field);
1704         __vmcs_writel(field, value);
1705 #ifndef CONFIG_X86_64
1706         asm volatile ("");
1707         __vmcs_writel(field+1, value >> 32);
1708 #endif
1709 }
1710
1711 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
1712 {
1713         vmcs_checkl(field);
1714         __vmcs_writel(field, value);
1715 }
1716
1717 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
1718 {
1719         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1720                          "vmcs_clear_bits does not support 64-bit fields");
1721         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
1722 }
1723
1724 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
1725 {
1726         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1727                          "vmcs_set_bits does not support 64-bit fields");
1728         __vmcs_writel(field, __vmcs_readl(field) | mask);
1729 }
1730
1731 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
1732 {
1733         vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
1734 }
1735
1736 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1737 {
1738         vmcs_write32(VM_ENTRY_CONTROLS, val);
1739         vmx->vm_entry_controls_shadow = val;
1740 }
1741
1742 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1743 {
1744         if (vmx->vm_entry_controls_shadow != val)
1745                 vm_entry_controls_init(vmx, val);
1746 }
1747
1748 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1749 {
1750         return vmx->vm_entry_controls_shadow;
1751 }
1752
1753
1754 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1755 {
1756         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1757 }
1758
1759 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1760 {
1761         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1762 }
1763
1764 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
1765 {
1766         vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
1767 }
1768
1769 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1770 {
1771         vmcs_write32(VM_EXIT_CONTROLS, val);
1772         vmx->vm_exit_controls_shadow = val;
1773 }
1774
1775 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1776 {
1777         if (vmx->vm_exit_controls_shadow != val)
1778                 vm_exit_controls_init(vmx, val);
1779 }
1780
1781 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1782 {
1783         return vmx->vm_exit_controls_shadow;
1784 }
1785
1786
1787 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1788 {
1789         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1790 }
1791
1792 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1793 {
1794         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1795 }
1796
1797 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1798 {
1799         vmx->segment_cache.bitmask = 0;
1800 }
1801
1802 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1803                                        unsigned field)
1804 {
1805         bool ret;
1806         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1807
1808         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1809                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1810                 vmx->segment_cache.bitmask = 0;
1811         }
1812         ret = vmx->segment_cache.bitmask & mask;
1813         vmx->segment_cache.bitmask |= mask;
1814         return ret;
1815 }
1816
1817 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1818 {
1819         u16 *p = &vmx->segment_cache.seg[seg].selector;
1820
1821         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1822                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1823         return *p;
1824 }
1825
1826 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1827 {
1828         ulong *p = &vmx->segment_cache.seg[seg].base;
1829
1830         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1831                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1832         return *p;
1833 }
1834
1835 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1836 {
1837         u32 *p = &vmx->segment_cache.seg[seg].limit;
1838
1839         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1840                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1841         return *p;
1842 }
1843
1844 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1845 {
1846         u32 *p = &vmx->segment_cache.seg[seg].ar;
1847
1848         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1849                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1850         return *p;
1851 }
1852
1853 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1854 {
1855         u32 eb;
1856
1857         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1858              (1u << DB_VECTOR) | (1u << AC_VECTOR);
1859         if ((vcpu->guest_debug &
1860              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1861             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1862                 eb |= 1u << BP_VECTOR;
1863         if (to_vmx(vcpu)->rmode.vm86_active)
1864                 eb = ~0;
1865         if (enable_ept)
1866                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1867
1868         /* When we are running a nested L2 guest and L1 specified for it a
1869          * certain exception bitmap, we must trap the same exceptions and pass
1870          * them to L1. When running L2, we will only handle the exceptions
1871          * specified above if L1 did not want them.
1872          */
1873         if (is_guest_mode(vcpu))
1874                 eb |= get_vmcs12(vcpu)->exception_bitmap;
1875
1876         vmcs_write32(EXCEPTION_BITMAP, eb);
1877 }
1878
1879 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1880                 unsigned long entry, unsigned long exit)
1881 {
1882         vm_entry_controls_clearbit(vmx, entry);
1883         vm_exit_controls_clearbit(vmx, exit);
1884 }
1885
1886 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1887 {
1888         unsigned i;
1889         struct msr_autoload *m = &vmx->msr_autoload;
1890
1891         switch (msr) {
1892         case MSR_EFER:
1893                 if (cpu_has_load_ia32_efer) {
1894                         clear_atomic_switch_msr_special(vmx,
1895                                         VM_ENTRY_LOAD_IA32_EFER,
1896                                         VM_EXIT_LOAD_IA32_EFER);
1897                         return;
1898                 }
1899                 break;
1900         case MSR_CORE_PERF_GLOBAL_CTRL:
1901                 if (cpu_has_load_perf_global_ctrl) {
1902                         clear_atomic_switch_msr_special(vmx,
1903                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1904                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1905                         return;
1906                 }
1907                 break;
1908         }
1909
1910         for (i = 0; i < m->nr; ++i)
1911                 if (m->guest[i].index == msr)
1912                         break;
1913
1914         if (i == m->nr)
1915                 return;
1916         --m->nr;
1917         m->guest[i] = m->guest[m->nr];
1918         m->host[i] = m->host[m->nr];
1919         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1920         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1921 }
1922
1923 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1924                 unsigned long entry, unsigned long exit,
1925                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1926                 u64 guest_val, u64 host_val)
1927 {
1928         vmcs_write64(guest_val_vmcs, guest_val);
1929         vmcs_write64(host_val_vmcs, host_val);
1930         vm_entry_controls_setbit(vmx, entry);
1931         vm_exit_controls_setbit(vmx, exit);
1932 }
1933
1934 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1935                                   u64 guest_val, u64 host_val)
1936 {
1937         unsigned i;
1938         struct msr_autoload *m = &vmx->msr_autoload;
1939
1940         switch (msr) {
1941         case MSR_EFER:
1942                 if (cpu_has_load_ia32_efer) {
1943                         add_atomic_switch_msr_special(vmx,
1944                                         VM_ENTRY_LOAD_IA32_EFER,
1945                                         VM_EXIT_LOAD_IA32_EFER,
1946                                         GUEST_IA32_EFER,
1947                                         HOST_IA32_EFER,
1948                                         guest_val, host_val);
1949                         return;
1950                 }
1951                 break;
1952         case MSR_CORE_PERF_GLOBAL_CTRL:
1953                 if (cpu_has_load_perf_global_ctrl) {
1954                         add_atomic_switch_msr_special(vmx,
1955                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1956                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1957                                         GUEST_IA32_PERF_GLOBAL_CTRL,
1958                                         HOST_IA32_PERF_GLOBAL_CTRL,
1959                                         guest_val, host_val);
1960                         return;
1961                 }
1962                 break;
1963         case MSR_IA32_PEBS_ENABLE:
1964                 /* PEBS needs a quiescent period after being disabled (to write
1965                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
1966                  * provide that period, so a CPU could write host's record into
1967                  * guest's memory.
1968                  */
1969                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1970         }
1971
1972         for (i = 0; i < m->nr; ++i)
1973                 if (m->guest[i].index == msr)
1974                         break;
1975
1976         if (i == NR_AUTOLOAD_MSRS) {
1977                 printk_once(KERN_WARNING "Not enough msr switch entries. "
1978                                 "Can't add msr %x\n", msr);
1979                 return;
1980         } else if (i == m->nr) {
1981                 ++m->nr;
1982                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1983                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1984         }
1985
1986         m->guest[i].index = msr;
1987         m->guest[i].value = guest_val;
1988         m->host[i].index = msr;
1989         m->host[i].value = host_val;
1990 }
1991
1992 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
1993 {
1994         u64 guest_efer = vmx->vcpu.arch.efer;
1995         u64 ignore_bits = 0;
1996
1997         if (!enable_ept) {
1998                 /*
1999                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
2000                  * host CPUID is more efficient than testing guest CPUID
2001                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
2002                  */
2003                 if (boot_cpu_has(X86_FEATURE_SMEP))
2004                         guest_efer |= EFER_NX;
2005                 else if (!(guest_efer & EFER_NX))
2006                         ignore_bits |= EFER_NX;
2007         }
2008
2009         /*
2010          * LMA and LME handled by hardware; SCE meaningless outside long mode.
2011          */
2012         ignore_bits |= EFER_SCE;
2013 #ifdef CONFIG_X86_64
2014         ignore_bits |= EFER_LMA | EFER_LME;
2015         /* SCE is meaningful only in long mode on Intel */
2016         if (guest_efer & EFER_LMA)
2017                 ignore_bits &= ~(u64)EFER_SCE;
2018 #endif
2019
2020         clear_atomic_switch_msr(vmx, MSR_EFER);
2021
2022         /*
2023          * On EPT, we can't emulate NX, so we must switch EFER atomically.
2024          * On CPUs that support "load IA32_EFER", always switch EFER
2025          * atomically, since it's faster than switching it manually.
2026          */
2027         if (cpu_has_load_ia32_efer ||
2028             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2029                 if (!(guest_efer & EFER_LMA))
2030                         guest_efer &= ~EFER_LME;
2031                 if (guest_efer != host_efer)
2032                         add_atomic_switch_msr(vmx, MSR_EFER,
2033                                               guest_efer, host_efer);
2034                 return false;
2035         } else {
2036                 guest_efer &= ~ignore_bits;
2037                 guest_efer |= host_efer & ignore_bits;
2038
2039                 vmx->guest_msrs[efer_offset].data = guest_efer;
2040                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2041
2042                 return true;
2043         }
2044 }
2045
2046 #ifdef CONFIG_X86_32
2047 /*
2048  * On 32-bit kernels, VM exits still load the FS and GS bases from the
2049  * VMCS rather than the segment table.  KVM uses this helper to figure
2050  * out the current bases to poke them into the VMCS before entry.
2051  */
2052 static unsigned long segment_base(u16 selector)
2053 {
2054         struct desc_struct *table;
2055         unsigned long v;
2056
2057         if (!(selector & ~SEGMENT_RPL_MASK))
2058                 return 0;
2059
2060         table = get_current_gdt_ro();
2061
2062         if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2063                 u16 ldt_selector = kvm_read_ldt();
2064
2065                 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2066                         return 0;
2067
2068                 table = (struct desc_struct *)segment_base(ldt_selector);
2069         }
2070         v = get_desc_base(&table[selector >> 3]);
2071         return v;
2072 }
2073 #endif
2074
2075 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2076 {
2077         struct vcpu_vmx *vmx = to_vmx(vcpu);
2078         int i;
2079
2080         if (vmx->host_state.loaded)
2081                 return;
2082
2083         vmx->host_state.loaded = 1;
2084         /*
2085          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2086          * allow segment selectors with cpl > 0 or ti == 1.
2087          */
2088         vmx->host_state.ldt_sel = kvm_read_ldt();
2089         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
2090         savesegment(fs, vmx->host_state.fs_sel);
2091         if (!(vmx->host_state.fs_sel & 7)) {
2092                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
2093                 vmx->host_state.fs_reload_needed = 0;
2094         } else {
2095                 vmcs_write16(HOST_FS_SELECTOR, 0);
2096                 vmx->host_state.fs_reload_needed = 1;
2097         }
2098         savesegment(gs, vmx->host_state.gs_sel);
2099         if (!(vmx->host_state.gs_sel & 7))
2100                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
2101         else {
2102                 vmcs_write16(HOST_GS_SELECTOR, 0);
2103                 vmx->host_state.gs_ldt_reload_needed = 1;
2104         }
2105
2106 #ifdef CONFIG_X86_64
2107         savesegment(ds, vmx->host_state.ds_sel);
2108         savesegment(es, vmx->host_state.es_sel);
2109 #endif
2110
2111 #ifdef CONFIG_X86_64
2112         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
2113         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
2114 #else
2115         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2116         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2117 #endif
2118
2119 #ifdef CONFIG_X86_64
2120         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2121         if (is_long_mode(&vmx->vcpu))
2122                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2123 #endif
2124         if (boot_cpu_has(X86_FEATURE_MPX))
2125                 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2126         for (i = 0; i < vmx->save_nmsrs; ++i)
2127                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2128                                    vmx->guest_msrs[i].data,
2129                                    vmx->guest_msrs[i].mask);
2130 }
2131
2132 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2133 {
2134         if (!vmx->host_state.loaded)
2135                 return;
2136
2137         ++vmx->vcpu.stat.host_state_reload;
2138         vmx->host_state.loaded = 0;
2139 #ifdef CONFIG_X86_64
2140         if (is_long_mode(&vmx->vcpu))
2141                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2142 #endif
2143         if (vmx->host_state.gs_ldt_reload_needed) {
2144                 kvm_load_ldt(vmx->host_state.ldt_sel);
2145 #ifdef CONFIG_X86_64
2146                 load_gs_index(vmx->host_state.gs_sel);
2147 #else
2148                 loadsegment(gs, vmx->host_state.gs_sel);
2149 #endif
2150         }
2151         if (vmx->host_state.fs_reload_needed)
2152                 loadsegment(fs, vmx->host_state.fs_sel);
2153 #ifdef CONFIG_X86_64
2154         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2155                 loadsegment(ds, vmx->host_state.ds_sel);
2156                 loadsegment(es, vmx->host_state.es_sel);
2157         }
2158 #endif
2159         invalidate_tss_limit();
2160 #ifdef CONFIG_X86_64
2161         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2162 #endif
2163         if (vmx->host_state.msr_host_bndcfgs)
2164                 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2165         load_fixmap_gdt(raw_smp_processor_id());
2166 }
2167
2168 static void vmx_load_host_state(struct vcpu_vmx *vmx)
2169 {
2170         preempt_disable();
2171         __vmx_load_host_state(vmx);
2172         preempt_enable();
2173 }
2174
2175 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2176 {
2177         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2178         struct pi_desc old, new;
2179         unsigned int dest;
2180
2181         /*
2182          * In case of hot-plug or hot-unplug, we may have to undo
2183          * vmx_vcpu_pi_put even if there is no assigned device.  And we
2184          * always keep PI.NDST up to date for simplicity: it makes the
2185          * code easier, and CPU migration is not a fast path.
2186          */
2187         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
2188                 return;
2189
2190         /*
2191          * First handle the simple case where no cmpxchg is necessary; just
2192          * allow posting non-urgent interrupts.
2193          *
2194          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2195          * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2196          * expects the VCPU to be on the blocked_vcpu_list that matches
2197          * PI.NDST.
2198          */
2199         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2200             vcpu->cpu == cpu) {
2201                 pi_clear_sn(pi_desc);
2202                 return;
2203         }
2204
2205         /* The full case.  */
2206         do {
2207                 old.control = new.control = pi_desc->control;
2208
2209                 dest = cpu_physical_id(cpu);
2210
2211                 if (x2apic_enabled())
2212                         new.ndst = dest;
2213                 else
2214                         new.ndst = (dest << 8) & 0xFF00;
2215
2216                 new.sn = 0;
2217         } while (cmpxchg64(&pi_desc->control, old.control,
2218                            new.control) != old.control);
2219 }
2220
2221 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2222 {
2223         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2224         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2225 }
2226
2227 /*
2228  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2229  * vcpu mutex is already taken.
2230  */
2231 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2232 {
2233         struct vcpu_vmx *vmx = to_vmx(vcpu);
2234         bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
2235
2236         if (!already_loaded) {
2237                 loaded_vmcs_clear(vmx->loaded_vmcs);
2238                 local_irq_disable();
2239                 crash_disable_local_vmclear(cpu);
2240
2241                 /*
2242                  * Read loaded_vmcs->cpu should be before fetching
2243                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
2244                  * See the comments in __loaded_vmcs_clear().
2245                  */
2246                 smp_rmb();
2247
2248                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2249                          &per_cpu(loaded_vmcss_on_cpu, cpu));
2250                 crash_enable_local_vmclear(cpu);
2251                 local_irq_enable();
2252         }
2253
2254         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2255                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2256                 vmcs_load(vmx->loaded_vmcs->vmcs);
2257         }
2258
2259         if (!already_loaded) {
2260                 void *gdt = get_current_gdt_ro();
2261                 unsigned long sysenter_esp;
2262
2263                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2264
2265                 /*
2266                  * Linux uses per-cpu TSS and GDT, so set these when switching
2267                  * processors.  See 22.2.4.
2268                  */
2269                 vmcs_writel(HOST_TR_BASE,
2270                             (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
2271                 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
2272
2273                 /*
2274                  * VM exits change the host TR limit to 0x67 after a VM
2275                  * exit.  This is okay, since 0x67 covers everything except
2276                  * the IO bitmap and have have code to handle the IO bitmap
2277                  * being lost after a VM exit.
2278                  */
2279                 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
2280
2281                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2282                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2283
2284                 vmx->loaded_vmcs->cpu = cpu;
2285         }
2286
2287         /* Setup TSC multiplier */
2288         if (kvm_has_tsc_control &&
2289             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2290                 decache_tsc_multiplier(vmx);
2291
2292         vmx_vcpu_pi_load(vcpu, cpu);
2293         vmx->host_pkru = read_pkru();
2294         vmx->host_debugctlmsr = get_debugctlmsr();
2295 }
2296
2297 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2298 {
2299         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2300
2301         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2302                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
2303                 !kvm_vcpu_apicv_active(vcpu))
2304                 return;
2305
2306         /* Set SN when the vCPU is preempted */
2307         if (vcpu->preempted)
2308                 pi_set_sn(pi_desc);
2309 }
2310
2311 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2312 {
2313         vmx_vcpu_pi_put(vcpu);
2314
2315         __vmx_load_host_state(to_vmx(vcpu));
2316 }
2317
2318 static bool emulation_required(struct kvm_vcpu *vcpu)
2319 {
2320         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2321 }
2322
2323 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2324
2325 /*
2326  * Return the cr0 value that a nested guest would read. This is a combination
2327  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2328  * its hypervisor (cr0_read_shadow).
2329  */
2330 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2331 {
2332         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2333                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2334 }
2335 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2336 {
2337         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2338                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2339 }
2340
2341 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2342 {
2343         unsigned long rflags, save_rflags;
2344
2345         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2346                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2347                 rflags = vmcs_readl(GUEST_RFLAGS);
2348                 if (to_vmx(vcpu)->rmode.vm86_active) {
2349                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2350                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2351                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2352                 }
2353                 to_vmx(vcpu)->rflags = rflags;
2354         }
2355         return to_vmx(vcpu)->rflags;
2356 }
2357
2358 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2359 {
2360         unsigned long old_rflags = vmx_get_rflags(vcpu);
2361
2362         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2363         to_vmx(vcpu)->rflags = rflags;
2364         if (to_vmx(vcpu)->rmode.vm86_active) {
2365                 to_vmx(vcpu)->rmode.save_rflags = rflags;
2366                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2367         }
2368         vmcs_writel(GUEST_RFLAGS, rflags);
2369
2370         if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2371                 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2372 }
2373
2374 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2375 {
2376         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2377         int ret = 0;
2378
2379         if (interruptibility & GUEST_INTR_STATE_STI)
2380                 ret |= KVM_X86_SHADOW_INT_STI;
2381         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2382                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2383
2384         return ret;
2385 }
2386
2387 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2388 {
2389         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2390         u32 interruptibility = interruptibility_old;
2391
2392         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2393
2394         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2395                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2396         else if (mask & KVM_X86_SHADOW_INT_STI)
2397                 interruptibility |= GUEST_INTR_STATE_STI;
2398
2399         if ((interruptibility != interruptibility_old))
2400                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2401 }
2402
2403 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2404 {
2405         unsigned long rip;
2406
2407         rip = kvm_rip_read(vcpu);
2408         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2409         kvm_rip_write(vcpu, rip);
2410
2411         /* skipping an emulated instruction also counts */
2412         vmx_set_interrupt_shadow(vcpu, 0);
2413 }
2414
2415 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
2416                                                unsigned long exit_qual)
2417 {
2418         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2419         unsigned int nr = vcpu->arch.exception.nr;
2420         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2421
2422         if (vcpu->arch.exception.has_error_code) {
2423                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
2424                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2425         }
2426
2427         if (kvm_exception_is_soft(nr))
2428                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2429         else
2430                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2431
2432         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
2433             vmx_get_nmi_mask(vcpu))
2434                 intr_info |= INTR_INFO_UNBLOCK_NMI;
2435
2436         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
2437 }
2438
2439 /*
2440  * KVM wants to inject page-faults which it got to the guest. This function
2441  * checks whether in a nested guest, we need to inject them to L1 or L2.
2442  */
2443 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
2444 {
2445         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2446         unsigned int nr = vcpu->arch.exception.nr;
2447
2448         if (nr == PF_VECTOR) {
2449                 if (vcpu->arch.exception.nested_apf) {
2450                         *exit_qual = vcpu->arch.apf.nested_apf_token;
2451                         return 1;
2452                 }
2453                 /*
2454                  * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
2455                  * The fix is to add the ancillary datum (CR2 or DR6) to structs
2456                  * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
2457                  * can be written only when inject_pending_event runs.  This should be
2458                  * conditional on a new capability---if the capability is disabled,
2459                  * kvm_multiple_exception would write the ancillary information to
2460                  * CR2 or DR6, for backwards ABI-compatibility.
2461                  */
2462                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
2463                                                     vcpu->arch.exception.error_code)) {
2464                         *exit_qual = vcpu->arch.cr2;
2465                         return 1;
2466                 }
2467         } else {
2468                 if (vmcs12->exception_bitmap & (1u << nr)) {
2469                         if (nr == DB_VECTOR)
2470                                 *exit_qual = vcpu->arch.dr6;
2471                         else
2472                                 *exit_qual = 0;
2473                         return 1;
2474                 }
2475         }
2476
2477         return 0;
2478 }
2479
2480 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
2481 {
2482         struct vcpu_vmx *vmx = to_vmx(vcpu);
2483         unsigned nr = vcpu->arch.exception.nr;
2484         bool has_error_code = vcpu->arch.exception.has_error_code;
2485         u32 error_code = vcpu->arch.exception.error_code;
2486         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2487
2488         if (has_error_code) {
2489                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2490                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2491         }
2492
2493         if (vmx->rmode.vm86_active) {
2494                 int inc_eip = 0;
2495                 if (kvm_exception_is_soft(nr))
2496                         inc_eip = vcpu->arch.event_exit_inst_len;
2497                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2498                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2499                 return;
2500         }
2501
2502         if (kvm_exception_is_soft(nr)) {
2503                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2504                              vmx->vcpu.arch.event_exit_inst_len);
2505                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2506         } else
2507                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2508
2509         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2510 }
2511
2512 static bool vmx_rdtscp_supported(void)
2513 {
2514         return cpu_has_vmx_rdtscp();
2515 }
2516
2517 static bool vmx_invpcid_supported(void)
2518 {
2519         return cpu_has_vmx_invpcid() && enable_ept;
2520 }
2521
2522 /*
2523  * Swap MSR entry in host/guest MSR entry array.
2524  */
2525 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2526 {
2527         struct shared_msr_entry tmp;
2528
2529         tmp = vmx->guest_msrs[to];
2530         vmx->guest_msrs[to] = vmx->guest_msrs[from];
2531         vmx->guest_msrs[from] = tmp;
2532 }
2533
2534 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2535 {
2536         unsigned long *msr_bitmap;
2537
2538         if (is_guest_mode(vcpu))
2539                 msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
2540         else if (cpu_has_secondary_exec_ctrls() &&
2541                  (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
2542                   SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
2543                 if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) {
2544                         if (is_long_mode(vcpu))
2545                                 msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv;
2546                         else
2547                                 msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv;
2548                 } else {
2549                         if (is_long_mode(vcpu))
2550                                 msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
2551                         else
2552                                 msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
2553                 }
2554         } else {
2555                 if (is_long_mode(vcpu))
2556                         msr_bitmap = vmx_msr_bitmap_longmode;
2557                 else
2558                         msr_bitmap = vmx_msr_bitmap_legacy;
2559         }
2560
2561         vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
2562 }
2563
2564 /*
2565  * Set up the vmcs to automatically save and restore system
2566  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
2567  * mode, as fiddling with msrs is very expensive.
2568  */
2569 static void setup_msrs(struct vcpu_vmx *vmx)
2570 {
2571         int save_nmsrs, index;
2572
2573         save_nmsrs = 0;
2574 #ifdef CONFIG_X86_64
2575         if (is_long_mode(&vmx->vcpu)) {
2576                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2577                 if (index >= 0)
2578                         move_msr_up(vmx, index, save_nmsrs++);
2579                 index = __find_msr_index(vmx, MSR_LSTAR);
2580                 if (index >= 0)
2581                         move_msr_up(vmx, index, save_nmsrs++);
2582                 index = __find_msr_index(vmx, MSR_CSTAR);
2583                 if (index >= 0)
2584                         move_msr_up(vmx, index, save_nmsrs++);
2585                 index = __find_msr_index(vmx, MSR_TSC_AUX);
2586                 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
2587                         move_msr_up(vmx, index, save_nmsrs++);
2588                 /*
2589                  * MSR_STAR is only needed on long mode guests, and only
2590                  * if efer.sce is enabled.
2591                  */
2592                 index = __find_msr_index(vmx, MSR_STAR);
2593                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2594                         move_msr_up(vmx, index, save_nmsrs++);
2595         }
2596 #endif
2597         index = __find_msr_index(vmx, MSR_EFER);
2598         if (index >= 0 && update_transition_efer(vmx, index))
2599                 move_msr_up(vmx, index, save_nmsrs++);
2600
2601         vmx->save_nmsrs = save_nmsrs;
2602
2603         if (cpu_has_vmx_msr_bitmap())
2604                 vmx_set_msr_bitmap(&vmx->vcpu);
2605 }
2606
2607 /*
2608  * reads and returns guest's timestamp counter "register"
2609  * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2610  * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2611  */
2612 static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2613 {
2614         u64 host_tsc, tsc_offset;
2615
2616         host_tsc = rdtsc();
2617         tsc_offset = vmcs_read64(TSC_OFFSET);
2618         return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
2619 }
2620
2621 /*
2622  * writes 'offset' into guest's timestamp counter offset register
2623  */
2624 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2625 {
2626         if (is_guest_mode(vcpu)) {
2627                 /*
2628                  * We're here if L1 chose not to trap WRMSR to TSC. According
2629                  * to the spec, this should set L1's TSC; The offset that L1
2630                  * set for L2 remains unchanged, and still needs to be added
2631                  * to the newly set TSC to get L2's TSC.
2632                  */
2633                 struct vmcs12 *vmcs12;
2634                 /* recalculate vmcs02.TSC_OFFSET: */
2635                 vmcs12 = get_vmcs12(vcpu);
2636                 vmcs_write64(TSC_OFFSET, offset +
2637                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2638                          vmcs12->tsc_offset : 0));
2639         } else {
2640                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2641                                            vmcs_read64(TSC_OFFSET), offset);
2642                 vmcs_write64(TSC_OFFSET, offset);
2643         }
2644 }
2645
2646 /*
2647  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2648  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2649  * all guests if the "nested" module option is off, and can also be disabled
2650  * for a single guest by disabling its VMX cpuid bit.
2651  */
2652 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2653 {
2654         return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
2655 }
2656
2657 /*
2658  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2659  * returned for the various VMX controls MSRs when nested VMX is enabled.
2660  * The same values should also be used to verify that vmcs12 control fields are
2661  * valid during nested entry from L1 to L2.
2662  * Each of these control msrs has a low and high 32-bit half: A low bit is on
2663  * if the corresponding bit in the (32-bit) control field *must* be on, and a
2664  * bit in the high half is on if the corresponding bit in the control field
2665  * may be on. See also vmx_control_verify().
2666  */
2667 static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2668 {
2669         /*
2670          * Note that as a general rule, the high half of the MSRs (bits in
2671          * the control fields which may be 1) should be initialized by the
2672          * intersection of the underlying hardware's MSR (i.e., features which
2673          * can be supported) and the list of features we want to expose -
2674          * because they are known to be properly supported in our code.
2675          * Also, usually, the low half of the MSRs (bits which must be 1) can
2676          * be set to 0, meaning that L1 may turn off any of these bits. The
2677          * reason is that if one of these bits is necessary, it will appear
2678          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2679          * fields of vmcs01 and vmcs02, will turn these bits off - and
2680          * nested_vmx_exit_reflected() will not pass related exits to L1.
2681          * These rules have exceptions below.
2682          */
2683
2684         /* pin-based controls */
2685         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2686                 vmx->nested.nested_vmx_pinbased_ctls_low,
2687                 vmx->nested.nested_vmx_pinbased_ctls_high);
2688         vmx->nested.nested_vmx_pinbased_ctls_low |=
2689                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2690         vmx->nested.nested_vmx_pinbased_ctls_high &=
2691                 PIN_BASED_EXT_INTR_MASK |
2692                 PIN_BASED_NMI_EXITING |
2693                 PIN_BASED_VIRTUAL_NMIS;
2694         vmx->nested.nested_vmx_pinbased_ctls_high |=
2695                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2696                 PIN_BASED_VMX_PREEMPTION_TIMER;
2697         if (kvm_vcpu_apicv_active(&vmx->vcpu))
2698                 vmx->nested.nested_vmx_pinbased_ctls_high |=
2699                         PIN_BASED_POSTED_INTR;
2700
2701         /* exit controls */
2702         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2703                 vmx->nested.nested_vmx_exit_ctls_low,
2704                 vmx->nested.nested_vmx_exit_ctls_high);
2705         vmx->nested.nested_vmx_exit_ctls_low =
2706                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2707
2708         vmx->nested.nested_vmx_exit_ctls_high &=
2709 #ifdef CONFIG_X86_64
2710                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2711 #endif
2712                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2713         vmx->nested.nested_vmx_exit_ctls_high |=
2714                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2715                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2716                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2717
2718         if (kvm_mpx_supported())
2719                 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2720
2721         /* We support free control of debug control saving. */
2722         vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2723
2724         /* entry controls */
2725         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2726                 vmx->nested.nested_vmx_entry_ctls_low,
2727                 vmx->nested.nested_vmx_entry_ctls_high);
2728         vmx->nested.nested_vmx_entry_ctls_low =
2729                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2730         vmx->nested.nested_vmx_entry_ctls_high &=
2731 #ifdef CONFIG_X86_64
2732                 VM_ENTRY_IA32E_MODE |
2733 #endif
2734                 VM_ENTRY_LOAD_IA32_PAT;
2735         vmx->nested.nested_vmx_entry_ctls_high |=
2736                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2737         if (kvm_mpx_supported())
2738                 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2739
2740         /* We support free control of debug control loading. */
2741         vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2742
2743         /* cpu-based controls */
2744         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2745                 vmx->nested.nested_vmx_procbased_ctls_low,
2746                 vmx->nested.nested_vmx_procbased_ctls_high);
2747         vmx->nested.nested_vmx_procbased_ctls_low =
2748                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2749         vmx->nested.nested_vmx_procbased_ctls_high &=
2750                 CPU_BASED_VIRTUAL_INTR_PENDING |
2751                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2752                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2753                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2754                 CPU_BASED_CR3_STORE_EXITING |
2755 #ifdef CONFIG_X86_64
2756                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2757 #endif
2758                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2759                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
2760                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
2761                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
2762                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2763         /*
2764          * We can allow some features even when not supported by the
2765          * hardware. For example, L1 can specify an MSR bitmap - and we
2766          * can use it to avoid exits to L1 - even when L0 runs L2
2767          * without MSR bitmaps.
2768          */
2769         vmx->nested.nested_vmx_procbased_ctls_high |=
2770                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2771                 CPU_BASED_USE_MSR_BITMAPS;
2772
2773         /* We support free control of CR3 access interception. */
2774         vmx->nested.nested_vmx_procbased_ctls_low &=
2775                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
2776
2777         /*
2778          * secondary cpu-based controls.  Do not include those that
2779          * depend on CPUID bits, they are added later by vmx_cpuid_update.
2780          */
2781         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2782                 vmx->nested.nested_vmx_secondary_ctls_low,
2783                 vmx->nested.nested_vmx_secondary_ctls_high);
2784         vmx->nested.nested_vmx_secondary_ctls_low = 0;
2785         vmx->nested.nested_vmx_secondary_ctls_high &=
2786                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2787                 SECONDARY_EXEC_DESC |
2788                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2789                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2790                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2791                 SECONDARY_EXEC_WBINVD_EXITING;
2792
2793         if (enable_ept) {
2794                 /* nested EPT: emulate EPT also to L1 */
2795                 vmx->nested.nested_vmx_secondary_ctls_high |=
2796                         SECONDARY_EXEC_ENABLE_EPT;
2797                 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2798                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
2799                 if (cpu_has_vmx_ept_execute_only())
2800                         vmx->nested.nested_vmx_ept_caps |=
2801                                 VMX_EPT_EXECUTE_ONLY_BIT;
2802                 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
2803                 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
2804                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
2805                         VMX_EPT_1GB_PAGE_BIT;
2806                 if (enable_ept_ad_bits) {
2807                         vmx->nested.nested_vmx_secondary_ctls_high |=
2808                                 SECONDARY_EXEC_ENABLE_PML;
2809                         vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
2810                 }
2811         }
2812
2813         if (cpu_has_vmx_vmfunc()) {
2814                 vmx->nested.nested_vmx_secondary_ctls_high |=
2815                         SECONDARY_EXEC_ENABLE_VMFUNC;
2816                 /*
2817                  * Advertise EPTP switching unconditionally
2818                  * since we emulate it
2819                  */
2820                 if (enable_ept)
2821                         vmx->nested.nested_vmx_vmfunc_controls =
2822                                 VMX_VMFUNC_EPTP_SWITCHING;
2823         }
2824
2825         /*
2826          * Old versions of KVM use the single-context version without
2827          * checking for support, so declare that it is supported even
2828          * though it is treated as global context.  The alternative is
2829          * not failing the single-context invvpid, and it is worse.
2830          */
2831         if (enable_vpid) {
2832                 vmx->nested.nested_vmx_secondary_ctls_high |=
2833                         SECONDARY_EXEC_ENABLE_VPID;
2834                 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2835                         VMX_VPID_EXTENT_SUPPORTED_MASK;
2836         }
2837
2838         if (enable_unrestricted_guest)
2839                 vmx->nested.nested_vmx_secondary_ctls_high |=
2840                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
2841
2842         /* miscellaneous data */
2843         rdmsr(MSR_IA32_VMX_MISC,
2844                 vmx->nested.nested_vmx_misc_low,
2845                 vmx->nested.nested_vmx_misc_high);
2846         vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2847         vmx->nested.nested_vmx_misc_low |=
2848                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
2849                 VMX_MISC_ACTIVITY_HLT;
2850         vmx->nested.nested_vmx_misc_high = 0;
2851
2852         /*
2853          * This MSR reports some information about VMX support. We
2854          * should return information about the VMX we emulate for the
2855          * guest, and the VMCS structure we give it - not about the
2856          * VMX support of the underlying hardware.
2857          */
2858         vmx->nested.nested_vmx_basic =
2859                 VMCS12_REVISION |
2860                 VMX_BASIC_TRUE_CTLS |
2861                 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2862                 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2863
2864         if (cpu_has_vmx_basic_inout())
2865                 vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
2866
2867         /*
2868          * These MSRs specify bits which the guest must keep fixed on
2869          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2870          * We picked the standard core2 setting.
2871          */
2872 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2873 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
2874         vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
2875         vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
2876
2877         /* These MSRs specify bits which the guest must keep fixed off. */
2878         rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
2879         rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
2880
2881         /* highest index: VMX_PREEMPTION_TIMER_VALUE */
2882         vmx->nested.nested_vmx_vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
2883 }
2884
2885 /*
2886  * if fixed0[i] == 1: val[i] must be 1
2887  * if fixed1[i] == 0: val[i] must be 0
2888  */
2889 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
2890 {
2891         return ((val & fixed1) | fixed0) == val;
2892 }
2893
2894 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2895 {
2896         return fixed_bits_valid(control, low, high);
2897 }
2898
2899 static inline u64 vmx_control_msr(u32 low, u32 high)
2900 {
2901         return low | ((u64)high << 32);
2902 }
2903
2904 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
2905 {
2906         superset &= mask;
2907         subset &= mask;
2908
2909         return (superset | subset) == superset;
2910 }
2911
2912 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
2913 {
2914         const u64 feature_and_reserved =
2915                 /* feature (except bit 48; see below) */
2916                 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
2917                 /* reserved */
2918                 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
2919         u64 vmx_basic = vmx->nested.nested_vmx_basic;
2920
2921         if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
2922                 return -EINVAL;
2923
2924         /*
2925          * KVM does not emulate a version of VMX that constrains physical
2926          * addresses of VMX structures (e.g. VMCS) to 32-bits.
2927          */
2928         if (data & BIT_ULL(48))
2929                 return -EINVAL;
2930
2931         if (vmx_basic_vmcs_revision_id(vmx_basic) !=
2932             vmx_basic_vmcs_revision_id(data))
2933                 return -EINVAL;
2934
2935         if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
2936                 return -EINVAL;
2937
2938         vmx->nested.nested_vmx_basic = data;
2939         return 0;
2940 }
2941
2942 static int
2943 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
2944 {
2945         u64 supported;
2946         u32 *lowp, *highp;
2947
2948         switch (msr_index) {
2949         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2950                 lowp = &vmx->nested.nested_vmx_pinbased_ctls_low;
2951                 highp = &vmx->nested.nested_vmx_pinbased_ctls_high;
2952                 break;
2953         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2954                 lowp = &vmx->nested.nested_vmx_procbased_ctls_low;
2955                 highp = &vmx->nested.nested_vmx_procbased_ctls_high;
2956                 break;
2957         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2958                 lowp = &vmx->nested.nested_vmx_exit_ctls_low;
2959                 highp = &vmx->nested.nested_vmx_exit_ctls_high;
2960                 break;
2961         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2962                 lowp = &vmx->nested.nested_vmx_entry_ctls_low;
2963                 highp = &vmx->nested.nested_vmx_entry_ctls_high;
2964                 break;
2965         case MSR_IA32_VMX_PROCBASED_CTLS2:
2966                 lowp = &vmx->nested.nested_vmx_secondary_ctls_low;
2967                 highp = &vmx->nested.nested_vmx_secondary_ctls_high;
2968                 break;
2969         default:
2970                 BUG();
2971         }
2972
2973         supported = vmx_control_msr(*lowp, *highp);
2974
2975         /* Check must-be-1 bits are still 1. */
2976         if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
2977                 return -EINVAL;
2978
2979         /* Check must-be-0 bits are still 0. */
2980         if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
2981                 return -EINVAL;
2982
2983         *lowp = data;
2984         *highp = data >> 32;
2985         return 0;
2986 }
2987
2988 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
2989 {
2990         const u64 feature_and_reserved_bits =
2991                 /* feature */
2992                 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
2993                 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
2994                 /* reserved */
2995                 GENMASK_ULL(13, 9) | BIT_ULL(31);
2996         u64 vmx_misc;
2997
2998         vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low,
2999                                    vmx->nested.nested_vmx_misc_high);
3000
3001         if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3002                 return -EINVAL;
3003
3004         if ((vmx->nested.nested_vmx_pinbased_ctls_high &
3005              PIN_BASED_VMX_PREEMPTION_TIMER) &&
3006             vmx_misc_preemption_timer_rate(data) !=
3007             vmx_misc_preemption_timer_rate(vmx_misc))
3008                 return -EINVAL;
3009
3010         if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3011                 return -EINVAL;
3012
3013         if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3014                 return -EINVAL;
3015
3016         if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3017                 return -EINVAL;
3018
3019         vmx->nested.nested_vmx_misc_low = data;
3020         vmx->nested.nested_vmx_misc_high = data >> 32;
3021         return 0;
3022 }
3023
3024 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3025 {
3026         u64 vmx_ept_vpid_cap;
3027
3028         vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
3029                                            vmx->nested.nested_vmx_vpid_caps);
3030
3031         /* Every bit is either reserved or a feature bit. */
3032         if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3033                 return -EINVAL;
3034
3035         vmx->nested.nested_vmx_ept_caps = data;
3036         vmx->nested.nested_vmx_vpid_caps = data >> 32;
3037         return 0;
3038 }
3039
3040 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3041 {
3042         u64 *msr;
3043
3044         switch (msr_index) {
3045         case MSR_IA32_VMX_CR0_FIXED0:
3046                 msr = &vmx->nested.nested_vmx_cr0_fixed0;
3047                 break;
3048         case MSR_IA32_VMX_CR4_FIXED0:
3049                 msr = &vmx->nested.nested_vmx_cr4_fixed0;
3050                 break;
3051         default:
3052                 BUG();
3053         }
3054
3055         /*
3056          * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3057          * must be 1 in the restored value.
3058          */
3059         if (!is_bitwise_subset(data, *msr, -1ULL))
3060                 return -EINVAL;
3061
3062         *msr = data;
3063         return 0;
3064 }
3065
3066 /*
3067  * Called when userspace is restoring VMX MSRs.
3068  *
3069  * Returns 0 on success, non-0 otherwise.
3070  */
3071 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3072 {
3073         struct vcpu_vmx *vmx = to_vmx(vcpu);
3074
3075         switch (msr_index) {
3076         case MSR_IA32_VMX_BASIC:
3077                 return vmx_restore_vmx_basic(vmx, data);
3078         case MSR_IA32_VMX_PINBASED_CTLS:
3079         case MSR_IA32_VMX_PROCBASED_CTLS:
3080         case MSR_IA32_VMX_EXIT_CTLS:
3081         case MSR_IA32_VMX_ENTRY_CTLS:
3082                 /*
3083                  * The "non-true" VMX capability MSRs are generated from the
3084                  * "true" MSRs, so we do not support restoring them directly.
3085                  *
3086                  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3087                  * should restore the "true" MSRs with the must-be-1 bits
3088                  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3089                  * DEFAULT SETTINGS".
3090                  */
3091                 return -EINVAL;
3092         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3093         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3094         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3095         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3096         case MSR_IA32_VMX_PROCBASED_CTLS2:
3097                 return vmx_restore_control_msr(vmx, msr_index, data);
3098         case MSR_IA32_VMX_MISC:
3099                 return vmx_restore_vmx_misc(vmx, data);
3100         case MSR_IA32_VMX_CR0_FIXED0:
3101         case MSR_IA32_VMX_CR4_FIXED0:
3102                 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3103         case MSR_IA32_VMX_CR0_FIXED1:
3104         case MSR_IA32_VMX_CR4_FIXED1:
3105                 /*
3106                  * These MSRs are generated based on the vCPU's CPUID, so we
3107                  * do not support restoring them directly.
3108                  */
3109                 return -EINVAL;
3110         case MSR_IA32_VMX_EPT_VPID_CAP:
3111                 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3112         case MSR_IA32_VMX_VMCS_ENUM:
3113                 vmx->nested.nested_vmx_vmcs_enum = data;
3114                 return 0;
3115         default:
3116                 /*
3117                  * The rest of the VMX capability MSRs do not support restore.
3118                  */
3119                 return -EINVAL;
3120         }
3121 }
3122
3123 /* Returns 0 on success, non-0 otherwise. */
3124 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
3125 {
3126         struct vcpu_vmx *vmx = to_vmx(vcpu);
3127
3128         switch (msr_index) {
3129         case MSR_IA32_VMX_BASIC:
3130                 *pdata = vmx->nested.nested_vmx_basic;
3131                 break;
3132         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3133         case MSR_IA32_VMX_PINBASED_CTLS:
3134                 *pdata = vmx_control_msr(
3135                         vmx->nested.nested_vmx_pinbased_ctls_low,
3136                         vmx->nested.nested_vmx_pinbased_ctls_high);
3137                 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3138                         *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3139                 break;
3140         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3141         case MSR_IA32_VMX_PROCBASED_CTLS:
3142                 *pdata = vmx_control_msr(
3143                         vmx->nested.nested_vmx_procbased_ctls_low,
3144                         vmx->nested.nested_vmx_procbased_ctls_high);
3145                 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3146                         *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3147                 break;
3148         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3149         case MSR_IA32_VMX_EXIT_CTLS:
3150                 *pdata = vmx_control_msr(
3151                         vmx->nested.nested_vmx_exit_ctls_low,
3152                         vmx->nested.nested_vmx_exit_ctls_high);
3153                 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3154                         *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3155                 break;
3156         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3157         case MSR_IA32_VMX_ENTRY_CTLS:
3158                 *pdata = vmx_control_msr(
3159                         vmx->nested.nested_vmx_entry_ctls_low,
3160                         vmx->nested.nested_vmx_entry_ctls_high);
3161                 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3162                         *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3163                 break;
3164         case MSR_IA32_VMX_MISC:
3165                 *pdata = vmx_control_msr(
3166                         vmx->nested.nested_vmx_misc_low,
3167                         vmx->nested.nested_vmx_misc_high);
3168                 break;
3169         case MSR_IA32_VMX_CR0_FIXED0:
3170                 *pdata = vmx->nested.nested_vmx_cr0_fixed0;
3171                 break;
3172         case MSR_IA32_VMX_CR0_FIXED1:
3173                 *pdata = vmx->nested.nested_vmx_cr0_fixed1;
3174                 break;
3175         case MSR_IA32_VMX_CR4_FIXED0:
3176                 *pdata = vmx->nested.nested_vmx_cr4_fixed0;
3177                 break;
3178         case MSR_IA32_VMX_CR4_FIXED1:
3179                 *pdata = vmx->nested.nested_vmx_cr4_fixed1;
3180                 break;
3181         case MSR_IA32_VMX_VMCS_ENUM:
3182                 *pdata = vmx->nested.nested_vmx_vmcs_enum;
3183                 break;
3184         case MSR_IA32_VMX_PROCBASED_CTLS2:
3185                 *pdata = vmx_control_msr(
3186                         vmx->nested.nested_vmx_secondary_ctls_low,
3187                         vmx->nested.nested_vmx_secondary_ctls_high);
3188                 break;
3189         case MSR_IA32_VMX_EPT_VPID_CAP:
3190                 *pdata = vmx->nested.nested_vmx_ept_caps |
3191                         ((u64)vmx->nested.nested_vmx_vpid_caps << 32);
3192                 break;
3193         case MSR_IA32_VMX_VMFUNC:
3194                 *pdata = vmx->nested.nested_vmx_vmfunc_controls;
3195                 break;
3196         default:
3197                 return 1;
3198         }
3199
3200         return 0;
3201 }
3202
3203 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
3204                                                  uint64_t val)
3205 {
3206         uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
3207
3208         return !(val & ~valid_bits);
3209 }
3210
3211 /*
3212  * Reads an msr value (of 'msr_index') into 'pdata'.
3213  * Returns 0 on success, non-0 otherwise.
3214  * Assumes vcpu_load() was already called.
3215  */
3216 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3217 {
3218         struct vcpu_vmx *vmx = to_vmx(vcpu);
3219         struct shared_msr_entry *msr;
3220
3221         switch (msr_info->index) {
3222 #ifdef CONFIG_X86_64
3223         case MSR_FS_BASE:
3224                 msr_info->data = vmcs_readl(GUEST_FS_BASE);
3225                 break;
3226         case MSR_GS_BASE:
3227                 msr_info->data = vmcs_readl(GUEST_GS_BASE);
3228                 break;
3229         case MSR_KERNEL_GS_BASE:
3230                 vmx_load_host_state(vmx);
3231                 msr_info->data = vmx->msr_guest_kernel_gs_base;
3232                 break;
3233 #endif
3234         case MSR_EFER:
3235                 return kvm_get_msr_common(vcpu, msr_info);
3236         case MSR_IA32_TSC:
3237                 msr_info->data = guest_read_tsc(vcpu);
3238                 break;
3239         case MSR_IA32_SYSENTER_CS:
3240                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
3241                 break;
3242         case MSR_IA32_SYSENTER_EIP:
3243                 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
3244                 break;
3245         case MSR_IA32_SYSENTER_ESP:
3246                 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
3247                 break;
3248         case MSR_IA32_BNDCFGS:
3249                 if (!kvm_mpx_supported() ||
3250                     (!msr_info->host_initiated &&
3251                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3252                         return 1;
3253                 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
3254                 break;
3255         case MSR_IA32_MCG_EXT_CTL:
3256                 if (!msr_info->host_initiated &&
3257                     !(vmx->msr_ia32_feature_control &
3258                       FEATURE_CONTROL_LMCE))
3259                         return 1;
3260                 msr_info->data = vcpu->arch.mcg_ext_ctl;
3261                 break;
3262         case MSR_IA32_FEATURE_CONTROL:
3263                 msr_info->data = vmx->msr_ia32_feature_control;
3264                 break;
3265         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3266                 if (!nested_vmx_allowed(vcpu))
3267                         return 1;
3268                 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
3269         case MSR_IA32_XSS:
3270                 if (!vmx_xsaves_supported())
3271                         return 1;
3272                 msr_info->data = vcpu->arch.ia32_xss;
3273                 break;
3274         case MSR_TSC_AUX:
3275                 if (!msr_info->host_initiated &&
3276                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3277                         return 1;
3278                 /* Otherwise falls through */
3279         default:
3280                 msr = find_msr_entry(vmx, msr_info->index);
3281                 if (msr) {
3282                         msr_info->data = msr->data;
3283                         break;
3284                 }
3285                 return kvm_get_msr_common(vcpu, msr_info);
3286         }
3287
3288         return 0;
3289 }
3290
3291 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
3292
3293 /*
3294  * Writes msr value into into the appropriate "register".
3295  * Returns 0 on success, non-0 otherwise.
3296  * Assumes vcpu_load() was already called.
3297  */
3298 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3299 {
3300         struct vcpu_vmx *vmx = to_vmx(vcpu);
3301         struct shared_msr_entry *msr;
3302         int ret = 0;
3303         u32 msr_index = msr_info->index;
3304         u64 data = msr_info->data;
3305
3306         switch (msr_index) {
3307         case MSR_EFER:
3308                 ret = kvm_set_msr_common(vcpu, msr_info);
3309                 break;
3310 #ifdef CONFIG_X86_64
3311         case MSR_FS_BASE:
3312                 vmx_segment_cache_clear(vmx);
3313                 vmcs_writel(GUEST_FS_BASE, data);
3314                 break;
3315         case MSR_GS_BASE:
3316                 vmx_segment_cache_clear(vmx);
3317                 vmcs_writel(GUEST_GS_BASE, data);
3318                 break;
3319         case MSR_KERNEL_GS_BASE:
3320                 vmx_load_host_state(vmx);
3321                 vmx->msr_guest_kernel_gs_base = data;
3322                 break;
3323 #endif
3324         case MSR_IA32_SYSENTER_CS:
3325                 vmcs_write32(GUEST_SYSENTER_CS, data);
3326                 break;
3327         case MSR_IA32_SYSENTER_EIP:
3328                 vmcs_writel(GUEST_SYSENTER_EIP, data);
3329                 break;
3330         case MSR_IA32_SYSENTER_ESP:
3331                 vmcs_writel(GUEST_SYSENTER_ESP, data);
3332                 break;
3333         case MSR_IA32_BNDCFGS:
3334                 if (!kvm_mpx_supported() ||
3335                     (!msr_info->host_initiated &&
3336                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3337                         return 1;
3338                 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
3339                     (data & MSR_IA32_BNDCFGS_RSVD))
3340                         return 1;
3341                 vmcs_write64(GUEST_BNDCFGS, data);
3342                 break;
3343         case MSR_IA32_TSC:
3344                 kvm_write_tsc(vcpu, msr_info);
3345                 break;
3346         case MSR_IA32_CR_PAT:
3347                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
3348                         if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3349                                 return 1;
3350                         vmcs_write64(GUEST_IA32_PAT, data);
3351                         vcpu->arch.pat = data;
3352                         break;
3353                 }
3354                 ret = kvm_set_msr_common(vcpu, msr_info);
3355                 break;
3356         case MSR_IA32_TSC_ADJUST:
3357                 ret = kvm_set_msr_common(vcpu, msr_info);
3358                 break;
3359         case MSR_IA32_MCG_EXT_CTL:
3360                 if ((!msr_info->host_initiated &&
3361                      !(to_vmx(vcpu)->msr_ia32_feature_control &
3362                        FEATURE_CONTROL_LMCE)) ||
3363                     (data & ~MCG_EXT_CTL_LMCE_EN))
3364                         return 1;
3365                 vcpu->arch.mcg_ext_ctl = data;
3366                 break;
3367         case MSR_IA32_FEATURE_CONTROL:
3368                 if (!vmx_feature_control_msr_valid(vcpu, data) ||
3369                     (to_vmx(vcpu)->msr_ia32_feature_control &
3370                      FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
3371                         return 1;
3372                 vmx->msr_ia32_feature_control = data;
3373                 if (msr_info->host_initiated && data == 0)
3374                         vmx_leave_nested(vcpu);
3375                 break;
3376         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3377                 if (!msr_info->host_initiated)
3378                         return 1; /* they are read-only */
3379                 if (!nested_vmx_allowed(vcpu))
3380                         return 1;
3381                 return vmx_set_vmx_msr(vcpu, msr_index, data);
3382         case MSR_IA32_XSS:
3383                 if (!vmx_xsaves_supported())
3384                         return 1;
3385                 /*
3386                  * The only supported bit as of Skylake is bit 8, but
3387                  * it is not supported on KVM.
3388                  */
3389                 if (data != 0)
3390                         return 1;
3391                 vcpu->arch.ia32_xss = data;
3392                 if (vcpu->arch.ia32_xss != host_xss)
3393                         add_atomic_switch_msr(vmx, MSR_IA32_XSS,
3394                                 vcpu->arch.ia32_xss, host_xss);
3395                 else
3396                         clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
3397                 break;