ab4b9bc99a52c7e10f713291a8dc669ce621d214
[sfrench/cifs-2.6.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22 #include "lapic.h"
23
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include "kvm_cache_regs.h"
38 #include "x86.h"
39
40 #include <asm/cpu.h>
41 #include <asm/io.h>
42 #include <asm/desc.h>
43 #include <asm/vmx.h>
44 #include <asm/virtext.h>
45 #include <asm/mce.h>
46 #include <asm/fpu/internal.h>
47 #include <asm/perf_event.h>
48 #include <asm/debugreg.h>
49 #include <asm/kexec.h>
50 #include <asm/apic.h>
51 #include <asm/irq_remapping.h>
52 #include <asm/mmu_context.h>
53 #include <asm/nospec-branch.h>
54
55 #include "trace.h"
56 #include "pmu.h"
57
58 #define __ex(x) __kvm_handle_fault_on_reboot(x)
59 #define __ex_clear(x, reg) \
60         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
61
62 MODULE_AUTHOR("Qumranet");
63 MODULE_LICENSE("GPL");
64
65 static const struct x86_cpu_id vmx_cpu_id[] = {
66         X86_FEATURE_MATCH(X86_FEATURE_VMX),
67         {}
68 };
69 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
70
71 static bool __read_mostly enable_vpid = 1;
72 module_param_named(vpid, enable_vpid, bool, 0444);
73
74 static bool __read_mostly enable_vnmi = 1;
75 module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
76
77 static bool __read_mostly flexpriority_enabled = 1;
78 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
79
80 static bool __read_mostly enable_ept = 1;
81 module_param_named(ept, enable_ept, bool, S_IRUGO);
82
83 static bool __read_mostly enable_unrestricted_guest = 1;
84 module_param_named(unrestricted_guest,
85                         enable_unrestricted_guest, bool, S_IRUGO);
86
87 static bool __read_mostly enable_ept_ad_bits = 1;
88 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
89
90 static bool __read_mostly emulate_invalid_guest_state = true;
91 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
92
93 static bool __read_mostly fasteoi = 1;
94 module_param(fasteoi, bool, S_IRUGO);
95
96 static bool __read_mostly enable_apicv = 1;
97 module_param(enable_apicv, bool, S_IRUGO);
98
99 static bool __read_mostly enable_shadow_vmcs = 1;
100 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
101 /*
102  * If nested=1, nested virtualization is supported, i.e., guests may use
103  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
104  * use VMX instructions.
105  */
106 static bool __read_mostly nested = 0;
107 module_param(nested, bool, S_IRUGO);
108
109 static u64 __read_mostly host_xss;
110
111 static bool __read_mostly enable_pml = 1;
112 module_param_named(pml, enable_pml, bool, S_IRUGO);
113
114 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
115
116 /* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
117 static int __read_mostly cpu_preemption_timer_multi;
118 static bool __read_mostly enable_preemption_timer = 1;
119 #ifdef CONFIG_X86_64
120 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
121 #endif
122
123 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
124 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
125 #define KVM_VM_CR0_ALWAYS_ON                                            \
126         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
127 #define KVM_CR4_GUEST_OWNED_BITS                                      \
128         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
129          | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
130
131 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
132 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
133
134 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
135
136 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
137
138 /*
139  * Hyper-V requires all of these, so mark them as supported even though
140  * they are just treated the same as all-context.
141  */
142 #define VMX_VPID_EXTENT_SUPPORTED_MASK          \
143         (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |  \
144         VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |    \
145         VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |    \
146         VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
147
148 /*
149  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
150  * ple_gap:    upper bound on the amount of time between two successive
151  *             executions of PAUSE in a loop. Also indicate if ple enabled.
152  *             According to test, this time is usually smaller than 128 cycles.
153  * ple_window: upper bound on the amount of time a guest is allowed to execute
154  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
155  *             less than 2^12 cycles
156  * Time is measured based on a counter that runs at the same rate as the TSC,
157  * refer SDM volume 3b section 21.6.13 & 22.1.3.
158  */
159 #define KVM_VMX_DEFAULT_PLE_GAP           128
160 #define KVM_VMX_DEFAULT_PLE_WINDOW        4096
161 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW   2
162 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
163 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX    \
164                 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
165
166 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
167 module_param(ple_gap, int, S_IRUGO);
168
169 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
170 module_param(ple_window, int, S_IRUGO);
171
172 /* Default doubles per-vcpu window every exit. */
173 static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
174 module_param(ple_window_grow, int, S_IRUGO);
175
176 /* Default resets per-vcpu window every exit to ple_window. */
177 static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
178 module_param(ple_window_shrink, int, S_IRUGO);
179
180 /* Default is to compute the maximum so we can never overflow. */
181 static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
182 static int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
183 module_param(ple_window_max, int, S_IRUGO);
184
185 extern const ulong vmx_return;
186
187 #define NR_AUTOLOAD_MSRS 8
188
189 struct vmcs {
190         u32 revision_id;
191         u32 abort;
192         char data[0];
193 };
194
195 /*
196  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
197  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
198  * loaded on this CPU (so we can clear them if the CPU goes down).
199  */
200 struct loaded_vmcs {
201         struct vmcs *vmcs;
202         struct vmcs *shadow_vmcs;
203         int cpu;
204         bool launched;
205         bool nmi_known_unmasked;
206         unsigned long vmcs_host_cr3;    /* May not match real cr3 */
207         unsigned long vmcs_host_cr4;    /* May not match real cr4 */
208         /* Support for vnmi-less CPUs */
209         int soft_vnmi_blocked;
210         ktime_t entry_time;
211         s64 vnmi_blocked_time;
212         struct list_head loaded_vmcss_on_cpu_link;
213 };
214
215 struct shared_msr_entry {
216         unsigned index;
217         u64 data;
218         u64 mask;
219 };
220
221 /*
222  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
223  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
224  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
225  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
226  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
227  * More than one of these structures may exist, if L1 runs multiple L2 guests.
228  * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
229  * underlying hardware which will be used to run L2.
230  * This structure is packed to ensure that its layout is identical across
231  * machines (necessary for live migration).
232  * If there are changes in this struct, VMCS12_REVISION must be changed.
233  */
234 typedef u64 natural_width;
235 struct __packed vmcs12 {
236         /* According to the Intel spec, a VMCS region must start with the
237          * following two fields. Then follow implementation-specific data.
238          */
239         u32 revision_id;
240         u32 abort;
241
242         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
243         u32 padding[7]; /* room for future expansion */
244
245         u64 io_bitmap_a;
246         u64 io_bitmap_b;
247         u64 msr_bitmap;
248         u64 vm_exit_msr_store_addr;
249         u64 vm_exit_msr_load_addr;
250         u64 vm_entry_msr_load_addr;
251         u64 tsc_offset;
252         u64 virtual_apic_page_addr;
253         u64 apic_access_addr;
254         u64 posted_intr_desc_addr;
255         u64 vm_function_control;
256         u64 ept_pointer;
257         u64 eoi_exit_bitmap0;
258         u64 eoi_exit_bitmap1;
259         u64 eoi_exit_bitmap2;
260         u64 eoi_exit_bitmap3;
261         u64 eptp_list_address;
262         u64 xss_exit_bitmap;
263         u64 guest_physical_address;
264         u64 vmcs_link_pointer;
265         u64 pml_address;
266         u64 guest_ia32_debugctl;
267         u64 guest_ia32_pat;
268         u64 guest_ia32_efer;
269         u64 guest_ia32_perf_global_ctrl;
270         u64 guest_pdptr0;
271         u64 guest_pdptr1;
272         u64 guest_pdptr2;
273         u64 guest_pdptr3;
274         u64 guest_bndcfgs;
275         u64 host_ia32_pat;
276         u64 host_ia32_efer;
277         u64 host_ia32_perf_global_ctrl;
278         u64 padding64[8]; /* room for future expansion */
279         /*
280          * To allow migration of L1 (complete with its L2 guests) between
281          * machines of different natural widths (32 or 64 bit), we cannot have
282          * unsigned long fields with no explict size. We use u64 (aliased
283          * natural_width) instead. Luckily, x86 is little-endian.
284          */
285         natural_width cr0_guest_host_mask;
286         natural_width cr4_guest_host_mask;
287         natural_width cr0_read_shadow;
288         natural_width cr4_read_shadow;
289         natural_width cr3_target_value0;
290         natural_width cr3_target_value1;
291         natural_width cr3_target_value2;
292         natural_width cr3_target_value3;
293         natural_width exit_qualification;
294         natural_width guest_linear_address;
295         natural_width guest_cr0;
296         natural_width guest_cr3;
297         natural_width guest_cr4;
298         natural_width guest_es_base;
299         natural_width guest_cs_base;
300         natural_width guest_ss_base;
301         natural_width guest_ds_base;
302         natural_width guest_fs_base;
303         natural_width guest_gs_base;
304         natural_width guest_ldtr_base;
305         natural_width guest_tr_base;
306         natural_width guest_gdtr_base;
307         natural_width guest_idtr_base;
308         natural_width guest_dr7;
309         natural_width guest_rsp;
310         natural_width guest_rip;
311         natural_width guest_rflags;
312         natural_width guest_pending_dbg_exceptions;
313         natural_width guest_sysenter_esp;
314         natural_width guest_sysenter_eip;
315         natural_width host_cr0;
316         natural_width host_cr3;
317         natural_width host_cr4;
318         natural_width host_fs_base;
319         natural_width host_gs_base;
320         natural_width host_tr_base;
321         natural_width host_gdtr_base;
322         natural_width host_idtr_base;
323         natural_width host_ia32_sysenter_esp;
324         natural_width host_ia32_sysenter_eip;
325         natural_width host_rsp;
326         natural_width host_rip;
327         natural_width paddingl[8]; /* room for future expansion */
328         u32 pin_based_vm_exec_control;
329         u32 cpu_based_vm_exec_control;
330         u32 exception_bitmap;
331         u32 page_fault_error_code_mask;
332         u32 page_fault_error_code_match;
333         u32 cr3_target_count;
334         u32 vm_exit_controls;
335         u32 vm_exit_msr_store_count;
336         u32 vm_exit_msr_load_count;
337         u32 vm_entry_controls;
338         u32 vm_entry_msr_load_count;
339         u32 vm_entry_intr_info_field;
340         u32 vm_entry_exception_error_code;
341         u32 vm_entry_instruction_len;
342         u32 tpr_threshold;
343         u32 secondary_vm_exec_control;
344         u32 vm_instruction_error;
345         u32 vm_exit_reason;
346         u32 vm_exit_intr_info;
347         u32 vm_exit_intr_error_code;
348         u32 idt_vectoring_info_field;
349         u32 idt_vectoring_error_code;
350         u32 vm_exit_instruction_len;
351         u32 vmx_instruction_info;
352         u32 guest_es_limit;
353         u32 guest_cs_limit;
354         u32 guest_ss_limit;
355         u32 guest_ds_limit;
356         u32 guest_fs_limit;
357         u32 guest_gs_limit;
358         u32 guest_ldtr_limit;
359         u32 guest_tr_limit;
360         u32 guest_gdtr_limit;
361         u32 guest_idtr_limit;
362         u32 guest_es_ar_bytes;
363         u32 guest_cs_ar_bytes;
364         u32 guest_ss_ar_bytes;
365         u32 guest_ds_ar_bytes;
366         u32 guest_fs_ar_bytes;
367         u32 guest_gs_ar_bytes;
368         u32 guest_ldtr_ar_bytes;
369         u32 guest_tr_ar_bytes;
370         u32 guest_interruptibility_info;
371         u32 guest_activity_state;
372         u32 guest_sysenter_cs;
373         u32 host_ia32_sysenter_cs;
374         u32 vmx_preemption_timer_value;
375         u32 padding32[7]; /* room for future expansion */
376         u16 virtual_processor_id;
377         u16 posted_intr_nv;
378         u16 guest_es_selector;
379         u16 guest_cs_selector;
380         u16 guest_ss_selector;
381         u16 guest_ds_selector;
382         u16 guest_fs_selector;
383         u16 guest_gs_selector;
384         u16 guest_ldtr_selector;
385         u16 guest_tr_selector;
386         u16 guest_intr_status;
387         u16 guest_pml_index;
388         u16 host_es_selector;
389         u16 host_cs_selector;
390         u16 host_ss_selector;
391         u16 host_ds_selector;
392         u16 host_fs_selector;
393         u16 host_gs_selector;
394         u16 host_tr_selector;
395 };
396
397 /*
398  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
399  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
400  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
401  */
402 #define VMCS12_REVISION 0x11e57ed0
403
404 /*
405  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
406  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
407  * current implementation, 4K are reserved to avoid future complications.
408  */
409 #define VMCS12_SIZE 0x1000
410
411 /*
412  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
413  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
414  */
415 struct nested_vmx {
416         /* Has the level1 guest done vmxon? */
417         bool vmxon;
418         gpa_t vmxon_ptr;
419         bool pml_full;
420
421         /* The guest-physical address of the current VMCS L1 keeps for L2 */
422         gpa_t current_vmptr;
423         /*
424          * Cache of the guest's VMCS, existing outside of guest memory.
425          * Loaded from guest memory during VMPTRLD. Flushed to guest
426          * memory during VMCLEAR and VMPTRLD.
427          */
428         struct vmcs12 *cached_vmcs12;
429         /*
430          * Indicates if the shadow vmcs must be updated with the
431          * data hold by vmcs12
432          */
433         bool sync_shadow_vmcs;
434
435         bool change_vmcs01_virtual_x2apic_mode;
436         /* L2 must run next, and mustn't decide to exit to L1. */
437         bool nested_run_pending;
438
439         struct loaded_vmcs vmcs02;
440
441         /*
442          * Guest pages referred to in the vmcs02 with host-physical
443          * pointers, so we must keep them pinned while L2 runs.
444          */
445         struct page *apic_access_page;
446         struct page *virtual_apic_page;
447         struct page *pi_desc_page;
448         struct pi_desc *pi_desc;
449         bool pi_pending;
450         u16 posted_intr_nv;
451
452         unsigned long *msr_bitmap;
453
454         struct hrtimer preemption_timer;
455         bool preemption_timer_expired;
456
457         /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
458         u64 vmcs01_debugctl;
459
460         u16 vpid02;
461         u16 last_vpid;
462
463         /*
464          * We only store the "true" versions of the VMX capability MSRs. We
465          * generate the "non-true" versions by setting the must-be-1 bits
466          * according to the SDM.
467          */
468         u32 nested_vmx_procbased_ctls_low;
469         u32 nested_vmx_procbased_ctls_high;
470         u32 nested_vmx_secondary_ctls_low;
471         u32 nested_vmx_secondary_ctls_high;
472         u32 nested_vmx_pinbased_ctls_low;
473         u32 nested_vmx_pinbased_ctls_high;
474         u32 nested_vmx_exit_ctls_low;
475         u32 nested_vmx_exit_ctls_high;
476         u32 nested_vmx_entry_ctls_low;
477         u32 nested_vmx_entry_ctls_high;
478         u32 nested_vmx_misc_low;
479         u32 nested_vmx_misc_high;
480         u32 nested_vmx_ept_caps;
481         u32 nested_vmx_vpid_caps;
482         u64 nested_vmx_basic;
483         u64 nested_vmx_cr0_fixed0;
484         u64 nested_vmx_cr0_fixed1;
485         u64 nested_vmx_cr4_fixed0;
486         u64 nested_vmx_cr4_fixed1;
487         u64 nested_vmx_vmcs_enum;
488         u64 nested_vmx_vmfunc_controls;
489
490         /* SMM related state */
491         struct {
492                 /* in VMX operation on SMM entry? */
493                 bool vmxon;
494                 /* in guest mode on SMM entry? */
495                 bool guest_mode;
496         } smm;
497 };
498
499 #define POSTED_INTR_ON  0
500 #define POSTED_INTR_SN  1
501
502 /* Posted-Interrupt Descriptor */
503 struct pi_desc {
504         u32 pir[8];     /* Posted interrupt requested */
505         union {
506                 struct {
507                                 /* bit 256 - Outstanding Notification */
508                         u16     on      : 1,
509                                 /* bit 257 - Suppress Notification */
510                                 sn      : 1,
511                                 /* bit 271:258 - Reserved */
512                                 rsvd_1  : 14;
513                                 /* bit 279:272 - Notification Vector */
514                         u8      nv;
515                                 /* bit 287:280 - Reserved */
516                         u8      rsvd_2;
517                                 /* bit 319:288 - Notification Destination */
518                         u32     ndst;
519                 };
520                 u64 control;
521         };
522         u32 rsvd[6];
523 } __aligned(64);
524
525 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
526 {
527         return test_and_set_bit(POSTED_INTR_ON,
528                         (unsigned long *)&pi_desc->control);
529 }
530
531 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
532 {
533         return test_and_clear_bit(POSTED_INTR_ON,
534                         (unsigned long *)&pi_desc->control);
535 }
536
537 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
538 {
539         return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
540 }
541
542 static inline void pi_clear_sn(struct pi_desc *pi_desc)
543 {
544         return clear_bit(POSTED_INTR_SN,
545                         (unsigned long *)&pi_desc->control);
546 }
547
548 static inline void pi_set_sn(struct pi_desc *pi_desc)
549 {
550         return set_bit(POSTED_INTR_SN,
551                         (unsigned long *)&pi_desc->control);
552 }
553
554 static inline void pi_clear_on(struct pi_desc *pi_desc)
555 {
556         clear_bit(POSTED_INTR_ON,
557                   (unsigned long *)&pi_desc->control);
558 }
559
560 static inline int pi_test_on(struct pi_desc *pi_desc)
561 {
562         return test_bit(POSTED_INTR_ON,
563                         (unsigned long *)&pi_desc->control);
564 }
565
566 static inline int pi_test_sn(struct pi_desc *pi_desc)
567 {
568         return test_bit(POSTED_INTR_SN,
569                         (unsigned long *)&pi_desc->control);
570 }
571
572 struct vcpu_vmx {
573         struct kvm_vcpu       vcpu;
574         unsigned long         host_rsp;
575         u8                    fail;
576         u32                   exit_intr_info;
577         u32                   idt_vectoring_info;
578         ulong                 rflags;
579         struct shared_msr_entry *guest_msrs;
580         int                   nmsrs;
581         int                   save_nmsrs;
582         unsigned long         host_idt_base;
583 #ifdef CONFIG_X86_64
584         u64                   msr_host_kernel_gs_base;
585         u64                   msr_guest_kernel_gs_base;
586 #endif
587         u32 vm_entry_controls_shadow;
588         u32 vm_exit_controls_shadow;
589         u32 secondary_exec_control;
590
591         /*
592          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
593          * non-nested (L1) guest, it always points to vmcs01. For a nested
594          * guest (L2), it points to a different VMCS.
595          */
596         struct loaded_vmcs    vmcs01;
597         struct loaded_vmcs   *loaded_vmcs;
598         bool                  __launched; /* temporary, used in vmx_vcpu_run */
599         struct msr_autoload {
600                 unsigned nr;
601                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
602                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
603         } msr_autoload;
604         struct {
605                 int           loaded;
606                 u16           fs_sel, gs_sel, ldt_sel;
607 #ifdef CONFIG_X86_64
608                 u16           ds_sel, es_sel;
609 #endif
610                 int           gs_ldt_reload_needed;
611                 int           fs_reload_needed;
612                 u64           msr_host_bndcfgs;
613         } host_state;
614         struct {
615                 int vm86_active;
616                 ulong save_rflags;
617                 struct kvm_segment segs[8];
618         } rmode;
619         struct {
620                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
621                 struct kvm_save_segment {
622                         u16 selector;
623                         unsigned long base;
624                         u32 limit;
625                         u32 ar;
626                 } seg[8];
627         } segment_cache;
628         int vpid;
629         bool emulation_required;
630
631         u32 exit_reason;
632
633         /* Posted interrupt descriptor */
634         struct pi_desc pi_desc;
635
636         /* Support for a guest hypervisor (nested VMX) */
637         struct nested_vmx nested;
638
639         /* Dynamic PLE window. */
640         int ple_window;
641         bool ple_window_dirty;
642
643         /* Support for PML */
644 #define PML_ENTITY_NUM          512
645         struct page *pml_pg;
646
647         /* apic deadline value in host tsc */
648         u64 hv_deadline_tsc;
649
650         u64 current_tsc_ratio;
651
652         u32 host_pkru;
653
654         /*
655          * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
656          * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
657          * in msr_ia32_feature_control_valid_bits.
658          */
659         u64 msr_ia32_feature_control;
660         u64 msr_ia32_feature_control_valid_bits;
661 };
662
663 enum segment_cache_field {
664         SEG_FIELD_SEL = 0,
665         SEG_FIELD_BASE = 1,
666         SEG_FIELD_LIMIT = 2,
667         SEG_FIELD_AR = 3,
668
669         SEG_FIELD_NR = 4
670 };
671
672 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
673 {
674         return container_of(vcpu, struct vcpu_vmx, vcpu);
675 }
676
677 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
678 {
679         return &(to_vmx(vcpu)->pi_desc);
680 }
681
682 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
683 #define FIELD(number, name)     [number] = VMCS12_OFFSET(name)
684 #define FIELD64(number, name)   [number] = VMCS12_OFFSET(name), \
685                                 [number##_HIGH] = VMCS12_OFFSET(name)+4
686
687
688 static unsigned long shadow_read_only_fields[] = {
689         /*
690          * We do NOT shadow fields that are modified when L0
691          * traps and emulates any vmx instruction (e.g. VMPTRLD,
692          * VMXON...) executed by L1.
693          * For example, VM_INSTRUCTION_ERROR is read
694          * by L1 if a vmx instruction fails (part of the error path).
695          * Note the code assumes this logic. If for some reason
696          * we start shadowing these fields then we need to
697          * force a shadow sync when L0 emulates vmx instructions
698          * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
699          * by nested_vmx_failValid)
700          */
701         VM_EXIT_REASON,
702         VM_EXIT_INTR_INFO,
703         VM_EXIT_INSTRUCTION_LEN,
704         IDT_VECTORING_INFO_FIELD,
705         IDT_VECTORING_ERROR_CODE,
706         VM_EXIT_INTR_ERROR_CODE,
707         EXIT_QUALIFICATION,
708         GUEST_LINEAR_ADDRESS,
709         GUEST_PHYSICAL_ADDRESS
710 };
711 static int max_shadow_read_only_fields =
712         ARRAY_SIZE(shadow_read_only_fields);
713
714 static unsigned long shadow_read_write_fields[] = {
715         TPR_THRESHOLD,
716         GUEST_RIP,
717         GUEST_RSP,
718         GUEST_CR0,
719         GUEST_CR3,
720         GUEST_CR4,
721         GUEST_INTERRUPTIBILITY_INFO,
722         GUEST_RFLAGS,
723         GUEST_CS_SELECTOR,
724         GUEST_CS_AR_BYTES,
725         GUEST_CS_LIMIT,
726         GUEST_CS_BASE,
727         GUEST_ES_BASE,
728         GUEST_BNDCFGS,
729         CR0_GUEST_HOST_MASK,
730         CR0_READ_SHADOW,
731         CR4_READ_SHADOW,
732         TSC_OFFSET,
733         EXCEPTION_BITMAP,
734         CPU_BASED_VM_EXEC_CONTROL,
735         VM_ENTRY_EXCEPTION_ERROR_CODE,
736         VM_ENTRY_INTR_INFO_FIELD,
737         VM_ENTRY_INSTRUCTION_LEN,
738         VM_ENTRY_EXCEPTION_ERROR_CODE,
739         HOST_FS_BASE,
740         HOST_GS_BASE,
741         HOST_FS_SELECTOR,
742         HOST_GS_SELECTOR
743 };
744 static int max_shadow_read_write_fields =
745         ARRAY_SIZE(shadow_read_write_fields);
746
747 static const unsigned short vmcs_field_to_offset_table[] = {
748         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
749         FIELD(POSTED_INTR_NV, posted_intr_nv),
750         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
751         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
752         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
753         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
754         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
755         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
756         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
757         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
758         FIELD(GUEST_INTR_STATUS, guest_intr_status),
759         FIELD(GUEST_PML_INDEX, guest_pml_index),
760         FIELD(HOST_ES_SELECTOR, host_es_selector),
761         FIELD(HOST_CS_SELECTOR, host_cs_selector),
762         FIELD(HOST_SS_SELECTOR, host_ss_selector),
763         FIELD(HOST_DS_SELECTOR, host_ds_selector),
764         FIELD(HOST_FS_SELECTOR, host_fs_selector),
765         FIELD(HOST_GS_SELECTOR, host_gs_selector),
766         FIELD(HOST_TR_SELECTOR, host_tr_selector),
767         FIELD64(IO_BITMAP_A, io_bitmap_a),
768         FIELD64(IO_BITMAP_B, io_bitmap_b),
769         FIELD64(MSR_BITMAP, msr_bitmap),
770         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
771         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
772         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
773         FIELD64(TSC_OFFSET, tsc_offset),
774         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
775         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
776         FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
777         FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
778         FIELD64(EPT_POINTER, ept_pointer),
779         FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
780         FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
781         FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
782         FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
783         FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
784         FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
785         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
786         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
787         FIELD64(PML_ADDRESS, pml_address),
788         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
789         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
790         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
791         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
792         FIELD64(GUEST_PDPTR0, guest_pdptr0),
793         FIELD64(GUEST_PDPTR1, guest_pdptr1),
794         FIELD64(GUEST_PDPTR2, guest_pdptr2),
795         FIELD64(GUEST_PDPTR3, guest_pdptr3),
796         FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
797         FIELD64(HOST_IA32_PAT, host_ia32_pat),
798         FIELD64(HOST_IA32_EFER, host_ia32_efer),
799         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
800         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
801         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
802         FIELD(EXCEPTION_BITMAP, exception_bitmap),
803         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
804         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
805         FIELD(CR3_TARGET_COUNT, cr3_target_count),
806         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
807         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
808         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
809         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
810         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
811         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
812         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
813         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
814         FIELD(TPR_THRESHOLD, tpr_threshold),
815         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
816         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
817         FIELD(VM_EXIT_REASON, vm_exit_reason),
818         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
819         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
820         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
821         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
822         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
823         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
824         FIELD(GUEST_ES_LIMIT, guest_es_limit),
825         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
826         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
827         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
828         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
829         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
830         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
831         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
832         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
833         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
834         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
835         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
836         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
837         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
838         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
839         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
840         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
841         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
842         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
843         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
844         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
845         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
846         FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
847         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
848         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
849         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
850         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
851         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
852         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
853         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
854         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
855         FIELD(EXIT_QUALIFICATION, exit_qualification),
856         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
857         FIELD(GUEST_CR0, guest_cr0),
858         FIELD(GUEST_CR3, guest_cr3),
859         FIELD(GUEST_CR4, guest_cr4),
860         FIELD(GUEST_ES_BASE, guest_es_base),
861         FIELD(GUEST_CS_BASE, guest_cs_base),
862         FIELD(GUEST_SS_BASE, guest_ss_base),
863         FIELD(GUEST_DS_BASE, guest_ds_base),
864         FIELD(GUEST_FS_BASE, guest_fs_base),
865         FIELD(GUEST_GS_BASE, guest_gs_base),
866         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
867         FIELD(GUEST_TR_BASE, guest_tr_base),
868         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
869         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
870         FIELD(GUEST_DR7, guest_dr7),
871         FIELD(GUEST_RSP, guest_rsp),
872         FIELD(GUEST_RIP, guest_rip),
873         FIELD(GUEST_RFLAGS, guest_rflags),
874         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
875         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
876         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
877         FIELD(HOST_CR0, host_cr0),
878         FIELD(HOST_CR3, host_cr3),
879         FIELD(HOST_CR4, host_cr4),
880         FIELD(HOST_FS_BASE, host_fs_base),
881         FIELD(HOST_GS_BASE, host_gs_base),
882         FIELD(HOST_TR_BASE, host_tr_base),
883         FIELD(HOST_GDTR_BASE, host_gdtr_base),
884         FIELD(HOST_IDTR_BASE, host_idtr_base),
885         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
886         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
887         FIELD(HOST_RSP, host_rsp),
888         FIELD(HOST_RIP, host_rip),
889 };
890
891 static inline short vmcs_field_to_offset(unsigned long field)
892 {
893         BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
894
895         if (field >= ARRAY_SIZE(vmcs_field_to_offset_table))
896                 return -ENOENT;
897
898         /*
899          * FIXME: Mitigation for CVE-2017-5753.  To be replaced with a
900          * generic mechanism.
901          */
902         asm("lfence");
903
904         if (vmcs_field_to_offset_table[field] == 0)
905                 return -ENOENT;
906
907         return vmcs_field_to_offset_table[field];
908 }
909
910 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
911 {
912         return to_vmx(vcpu)->nested.cached_vmcs12;
913 }
914
915 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
916 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
917 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
918 static bool vmx_xsaves_supported(void);
919 static void vmx_set_segment(struct kvm_vcpu *vcpu,
920                             struct kvm_segment *var, int seg);
921 static void vmx_get_segment(struct kvm_vcpu *vcpu,
922                             struct kvm_segment *var, int seg);
923 static bool guest_state_valid(struct kvm_vcpu *vcpu);
924 static u32 vmx_segment_access_rights(struct kvm_segment *var);
925 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
926 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
927 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
928 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
929                                             u16 error_code);
930
931 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
932 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
933 /*
934  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
935  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
936  */
937 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
938
939 /*
940  * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
941  * can find which vCPU should be waken up.
942  */
943 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
944 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
945
946 enum {
947         VMX_IO_BITMAP_A,
948         VMX_IO_BITMAP_B,
949         VMX_MSR_BITMAP_LEGACY,
950         VMX_MSR_BITMAP_LONGMODE,
951         VMX_MSR_BITMAP_LEGACY_X2APIC_APICV,
952         VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV,
953         VMX_MSR_BITMAP_LEGACY_X2APIC,
954         VMX_MSR_BITMAP_LONGMODE_X2APIC,
955         VMX_VMREAD_BITMAP,
956         VMX_VMWRITE_BITMAP,
957         VMX_BITMAP_NR
958 };
959
960 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
961
962 #define vmx_io_bitmap_a                      (vmx_bitmap[VMX_IO_BITMAP_A])
963 #define vmx_io_bitmap_b                      (vmx_bitmap[VMX_IO_BITMAP_B])
964 #define vmx_msr_bitmap_legacy                (vmx_bitmap[VMX_MSR_BITMAP_LEGACY])
965 #define vmx_msr_bitmap_longmode              (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE])
966 #define vmx_msr_bitmap_legacy_x2apic_apicv   (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV])
967 #define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV])
968 #define vmx_msr_bitmap_legacy_x2apic         (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC])
969 #define vmx_msr_bitmap_longmode_x2apic       (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC])
970 #define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
971 #define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])
972
973 static bool cpu_has_load_ia32_efer;
974 static bool cpu_has_load_perf_global_ctrl;
975
976 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
977 static DEFINE_SPINLOCK(vmx_vpid_lock);
978
979 static struct vmcs_config {
980         int size;
981         int order;
982         u32 basic_cap;
983         u32 revision_id;
984         u32 pin_based_exec_ctrl;
985         u32 cpu_based_exec_ctrl;
986         u32 cpu_based_2nd_exec_ctrl;
987         u32 vmexit_ctrl;
988         u32 vmentry_ctrl;
989 } vmcs_config;
990
991 static struct vmx_capability {
992         u32 ept;
993         u32 vpid;
994 } vmx_capability;
995
996 #define VMX_SEGMENT_FIELD(seg)                                  \
997         [VCPU_SREG_##seg] = {                                   \
998                 .selector = GUEST_##seg##_SELECTOR,             \
999                 .base = GUEST_##seg##_BASE,                     \
1000                 .limit = GUEST_##seg##_LIMIT,                   \
1001                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
1002         }
1003
1004 static const struct kvm_vmx_segment_field {
1005         unsigned selector;
1006         unsigned base;
1007         unsigned limit;
1008         unsigned ar_bytes;
1009 } kvm_vmx_segment_fields[] = {
1010         VMX_SEGMENT_FIELD(CS),
1011         VMX_SEGMENT_FIELD(DS),
1012         VMX_SEGMENT_FIELD(ES),
1013         VMX_SEGMENT_FIELD(FS),
1014         VMX_SEGMENT_FIELD(GS),
1015         VMX_SEGMENT_FIELD(SS),
1016         VMX_SEGMENT_FIELD(TR),
1017         VMX_SEGMENT_FIELD(LDTR),
1018 };
1019
1020 static u64 host_efer;
1021
1022 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1023
1024 /*
1025  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1026  * away by decrementing the array size.
1027  */
1028 static const u32 vmx_msr_index[] = {
1029 #ifdef CONFIG_X86_64
1030         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1031 #endif
1032         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1033 };
1034
1035 static inline bool is_exception_n(u32 intr_info, u8 vector)
1036 {
1037         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1038                              INTR_INFO_VALID_MASK)) ==
1039                 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1040 }
1041
1042 static inline bool is_debug(u32 intr_info)
1043 {
1044         return is_exception_n(intr_info, DB_VECTOR);
1045 }
1046
1047 static inline bool is_breakpoint(u32 intr_info)
1048 {
1049         return is_exception_n(intr_info, BP_VECTOR);
1050 }
1051
1052 static inline bool is_page_fault(u32 intr_info)
1053 {
1054         return is_exception_n(intr_info, PF_VECTOR);
1055 }
1056
1057 static inline bool is_no_device(u32 intr_info)
1058 {
1059         return is_exception_n(intr_info, NM_VECTOR);
1060 }
1061
1062 static inline bool is_invalid_opcode(u32 intr_info)
1063 {
1064         return is_exception_n(intr_info, UD_VECTOR);
1065 }
1066
1067 static inline bool is_external_interrupt(u32 intr_info)
1068 {
1069         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1070                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1071 }
1072
1073 static inline bool is_machine_check(u32 intr_info)
1074 {
1075         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1076                              INTR_INFO_VALID_MASK)) ==
1077                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1078 }
1079
1080 static inline bool cpu_has_vmx_msr_bitmap(void)
1081 {
1082         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1083 }
1084
1085 static inline bool cpu_has_vmx_tpr_shadow(void)
1086 {
1087         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1088 }
1089
1090 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1091 {
1092         return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1093 }
1094
1095 static inline bool cpu_has_secondary_exec_ctrls(void)
1096 {
1097         return vmcs_config.cpu_based_exec_ctrl &
1098                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1099 }
1100
1101 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1102 {
1103         return vmcs_config.cpu_based_2nd_exec_ctrl &
1104                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1105 }
1106
1107 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1108 {
1109         return vmcs_config.cpu_based_2nd_exec_ctrl &
1110                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1111 }
1112
1113 static inline bool cpu_has_vmx_apic_register_virt(void)
1114 {
1115         return vmcs_config.cpu_based_2nd_exec_ctrl &
1116                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1117 }
1118
1119 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1120 {
1121         return vmcs_config.cpu_based_2nd_exec_ctrl &
1122                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1123 }
1124
1125 /*
1126  * Comment's format: document - errata name - stepping - processor name.
1127  * Refer from
1128  * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1129  */
1130 static u32 vmx_preemption_cpu_tfms[] = {
1131 /* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
1132 0x000206E6,
1133 /* 323056.pdf - AAX65  - C2 - Xeon L3406 */
1134 /* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1135 /* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1136 0x00020652,
1137 /* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1138 0x00020655,
1139 /* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
1140 /* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
1141 /*
1142  * 320767.pdf - AAP86  - B1 -
1143  * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1144  */
1145 0x000106E5,
1146 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1147 0x000106A0,
1148 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1149 0x000106A1,
1150 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1151 0x000106A4,
1152  /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1153  /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1154  /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1155 0x000106A5,
1156 };
1157
1158 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1159 {
1160         u32 eax = cpuid_eax(0x00000001), i;
1161
1162         /* Clear the reserved bits */
1163         eax &= ~(0x3U << 14 | 0xfU << 28);
1164         for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1165                 if (eax == vmx_preemption_cpu_tfms[i])
1166                         return true;
1167
1168         return false;
1169 }
1170
1171 static inline bool cpu_has_vmx_preemption_timer(void)
1172 {
1173         return vmcs_config.pin_based_exec_ctrl &
1174                 PIN_BASED_VMX_PREEMPTION_TIMER;
1175 }
1176
1177 static inline bool cpu_has_vmx_posted_intr(void)
1178 {
1179         return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1180                 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1181 }
1182
1183 static inline bool cpu_has_vmx_apicv(void)
1184 {
1185         return cpu_has_vmx_apic_register_virt() &&
1186                 cpu_has_vmx_virtual_intr_delivery() &&
1187                 cpu_has_vmx_posted_intr();
1188 }
1189
1190 static inline bool cpu_has_vmx_flexpriority(void)
1191 {
1192         return cpu_has_vmx_tpr_shadow() &&
1193                 cpu_has_vmx_virtualize_apic_accesses();
1194 }
1195
1196 static inline bool cpu_has_vmx_ept_execute_only(void)
1197 {
1198         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1199 }
1200
1201 static inline bool cpu_has_vmx_ept_2m_page(void)
1202 {
1203         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1204 }
1205
1206 static inline bool cpu_has_vmx_ept_1g_page(void)
1207 {
1208         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1209 }
1210
1211 static inline bool cpu_has_vmx_ept_4levels(void)
1212 {
1213         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1214 }
1215
1216 static inline bool cpu_has_vmx_ept_mt_wb(void)
1217 {
1218         return vmx_capability.ept & VMX_EPTP_WB_BIT;
1219 }
1220
1221 static inline bool cpu_has_vmx_ept_5levels(void)
1222 {
1223         return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1224 }
1225
1226 static inline bool cpu_has_vmx_ept_ad_bits(void)
1227 {
1228         return vmx_capability.ept & VMX_EPT_AD_BIT;
1229 }
1230
1231 static inline bool cpu_has_vmx_invept_context(void)
1232 {
1233         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1234 }
1235
1236 static inline bool cpu_has_vmx_invept_global(void)
1237 {
1238         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1239 }
1240
1241 static inline bool cpu_has_vmx_invvpid_single(void)
1242 {
1243         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1244 }
1245
1246 static inline bool cpu_has_vmx_invvpid_global(void)
1247 {
1248         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1249 }
1250
1251 static inline bool cpu_has_vmx_invvpid(void)
1252 {
1253         return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1254 }
1255
1256 static inline bool cpu_has_vmx_ept(void)
1257 {
1258         return vmcs_config.cpu_based_2nd_exec_ctrl &
1259                 SECONDARY_EXEC_ENABLE_EPT;
1260 }
1261
1262 static inline bool cpu_has_vmx_unrestricted_guest(void)
1263 {
1264         return vmcs_config.cpu_based_2nd_exec_ctrl &
1265                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1266 }
1267
1268 static inline bool cpu_has_vmx_ple(void)
1269 {
1270         return vmcs_config.cpu_based_2nd_exec_ctrl &
1271                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1272 }
1273
1274 static inline bool cpu_has_vmx_basic_inout(void)
1275 {
1276         return  (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1277 }
1278
1279 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1280 {
1281         return flexpriority_enabled && lapic_in_kernel(vcpu);
1282 }
1283
1284 static inline bool cpu_has_vmx_vpid(void)
1285 {
1286         return vmcs_config.cpu_based_2nd_exec_ctrl &
1287                 SECONDARY_EXEC_ENABLE_VPID;
1288 }
1289
1290 static inline bool cpu_has_vmx_rdtscp(void)
1291 {
1292         return vmcs_config.cpu_based_2nd_exec_ctrl &
1293                 SECONDARY_EXEC_RDTSCP;
1294 }
1295
1296 static inline bool cpu_has_vmx_invpcid(void)
1297 {
1298         return vmcs_config.cpu_based_2nd_exec_ctrl &
1299                 SECONDARY_EXEC_ENABLE_INVPCID;
1300 }
1301
1302 static inline bool cpu_has_virtual_nmis(void)
1303 {
1304         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
1305 }
1306
1307 static inline bool cpu_has_vmx_wbinvd_exit(void)
1308 {
1309         return vmcs_config.cpu_based_2nd_exec_ctrl &
1310                 SECONDARY_EXEC_WBINVD_EXITING;
1311 }
1312
1313 static inline bool cpu_has_vmx_shadow_vmcs(void)
1314 {
1315         u64 vmx_msr;
1316         rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1317         /* check if the cpu supports writing r/o exit information fields */
1318         if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1319                 return false;
1320
1321         return vmcs_config.cpu_based_2nd_exec_ctrl &
1322                 SECONDARY_EXEC_SHADOW_VMCS;
1323 }
1324
1325 static inline bool cpu_has_vmx_pml(void)
1326 {
1327         return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1328 }
1329
1330 static inline bool cpu_has_vmx_tsc_scaling(void)
1331 {
1332         return vmcs_config.cpu_based_2nd_exec_ctrl &
1333                 SECONDARY_EXEC_TSC_SCALING;
1334 }
1335
1336 static inline bool cpu_has_vmx_vmfunc(void)
1337 {
1338         return vmcs_config.cpu_based_2nd_exec_ctrl &
1339                 SECONDARY_EXEC_ENABLE_VMFUNC;
1340 }
1341
1342 static inline bool report_flexpriority(void)
1343 {
1344         return flexpriority_enabled;
1345 }
1346
1347 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1348 {
1349         return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low);
1350 }
1351
1352 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1353 {
1354         return vmcs12->cpu_based_vm_exec_control & bit;
1355 }
1356
1357 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1358 {
1359         return (vmcs12->cpu_based_vm_exec_control &
1360                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1361                 (vmcs12->secondary_vm_exec_control & bit);
1362 }
1363
1364 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1365 {
1366         return vmcs12->pin_based_vm_exec_control &
1367                 PIN_BASED_VMX_PREEMPTION_TIMER;
1368 }
1369
1370 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1371 {
1372         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1373 }
1374
1375 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1376 {
1377         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1378 }
1379
1380 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1381 {
1382         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1383 }
1384
1385 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1386 {
1387         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1388 }
1389
1390 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1391 {
1392         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1393 }
1394
1395 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1396 {
1397         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1398 }
1399
1400 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1401 {
1402         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1403 }
1404
1405 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1406 {
1407         return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1408 }
1409
1410 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
1411 {
1412         return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
1413 }
1414
1415 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
1416 {
1417         return nested_cpu_has_vmfunc(vmcs12) &&
1418                 (vmcs12->vm_function_control &
1419                  VMX_VMFUNC_EPTP_SWITCHING);
1420 }
1421
1422 static inline bool is_nmi(u32 intr_info)
1423 {
1424         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1425                 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
1426 }
1427
1428 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1429                               u32 exit_intr_info,
1430                               unsigned long exit_qualification);
1431 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1432                         struct vmcs12 *vmcs12,
1433                         u32 reason, unsigned long qualification);
1434
1435 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1436 {
1437         int i;
1438
1439         for (i = 0; i < vmx->nmsrs; ++i)
1440                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1441                         return i;
1442         return -1;
1443 }
1444
1445 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1446 {
1447     struct {
1448         u64 vpid : 16;
1449         u64 rsvd : 48;
1450         u64 gva;
1451     } operand = { vpid, 0, gva };
1452
1453     asm volatile (__ex(ASM_VMX_INVVPID)
1454                   /* CF==1 or ZF==1 --> rc = -1 */
1455                   "; ja 1f ; ud2 ; 1:"
1456                   : : "a"(&operand), "c"(ext) : "cc", "memory");
1457 }
1458
1459 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1460 {
1461         struct {
1462                 u64 eptp, gpa;
1463         } operand = {eptp, gpa};
1464
1465         asm volatile (__ex(ASM_VMX_INVEPT)
1466                         /* CF==1 or ZF==1 --> rc = -1 */
1467                         "; ja 1f ; ud2 ; 1:\n"
1468                         : : "a" (&operand), "c" (ext) : "cc", "memory");
1469 }
1470
1471 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1472 {
1473         int i;
1474
1475         i = __find_msr_index(vmx, msr);
1476         if (i >= 0)
1477                 return &vmx->guest_msrs[i];
1478         return NULL;
1479 }
1480
1481 static void vmcs_clear(struct vmcs *vmcs)
1482 {
1483         u64 phys_addr = __pa(vmcs);
1484         u8 error;
1485
1486         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1487                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1488                       : "cc", "memory");
1489         if (error)
1490                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1491                        vmcs, phys_addr);
1492 }
1493
1494 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1495 {
1496         vmcs_clear(loaded_vmcs->vmcs);
1497         if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1498                 vmcs_clear(loaded_vmcs->shadow_vmcs);
1499         loaded_vmcs->cpu = -1;
1500         loaded_vmcs->launched = 0;
1501 }
1502
1503 static void vmcs_load(struct vmcs *vmcs)
1504 {
1505         u64 phys_addr = __pa(vmcs);
1506         u8 error;
1507
1508         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1509                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1510                         : "cc", "memory");
1511         if (error)
1512                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1513                        vmcs, phys_addr);
1514 }
1515
1516 #ifdef CONFIG_KEXEC_CORE
1517 /*
1518  * This bitmap is used to indicate whether the vmclear
1519  * operation is enabled on all cpus. All disabled by
1520  * default.
1521  */
1522 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1523
1524 static inline void crash_enable_local_vmclear(int cpu)
1525 {
1526         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1527 }
1528
1529 static inline void crash_disable_local_vmclear(int cpu)
1530 {
1531         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1532 }
1533
1534 static inline int crash_local_vmclear_enabled(int cpu)
1535 {
1536         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1537 }
1538
1539 static void crash_vmclear_local_loaded_vmcss(void)
1540 {
1541         int cpu = raw_smp_processor_id();
1542         struct loaded_vmcs *v;
1543
1544         if (!crash_local_vmclear_enabled(cpu))
1545                 return;
1546
1547         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1548                             loaded_vmcss_on_cpu_link)
1549                 vmcs_clear(v->vmcs);
1550 }
1551 #else
1552 static inline void crash_enable_local_vmclear(int cpu) { }
1553 static inline void crash_disable_local_vmclear(int cpu) { }
1554 #endif /* CONFIG_KEXEC_CORE */
1555
1556 static void __loaded_vmcs_clear(void *arg)
1557 {
1558         struct loaded_vmcs *loaded_vmcs = arg;
1559         int cpu = raw_smp_processor_id();
1560
1561         if (loaded_vmcs->cpu != cpu)
1562                 return; /* vcpu migration can race with cpu offline */
1563         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1564                 per_cpu(current_vmcs, cpu) = NULL;
1565         crash_disable_local_vmclear(cpu);
1566         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1567
1568         /*
1569          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1570          * is before setting loaded_vmcs->vcpu to -1 which is done in
1571          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1572          * then adds the vmcs into percpu list before it is deleted.
1573          */
1574         smp_wmb();
1575
1576         loaded_vmcs_init(loaded_vmcs);
1577         crash_enable_local_vmclear(cpu);
1578 }
1579
1580 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1581 {
1582         int cpu = loaded_vmcs->cpu;
1583
1584         if (cpu != -1)
1585                 smp_call_function_single(cpu,
1586                          __loaded_vmcs_clear, loaded_vmcs, 1);
1587 }
1588
1589 static inline void vpid_sync_vcpu_single(int vpid)
1590 {
1591         if (vpid == 0)
1592                 return;
1593
1594         if (cpu_has_vmx_invvpid_single())
1595                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
1596 }
1597
1598 static inline void vpid_sync_vcpu_global(void)
1599 {
1600         if (cpu_has_vmx_invvpid_global())
1601                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1602 }
1603
1604 static inline void vpid_sync_context(int vpid)
1605 {
1606         if (cpu_has_vmx_invvpid_single())
1607                 vpid_sync_vcpu_single(vpid);
1608         else
1609                 vpid_sync_vcpu_global();
1610 }
1611
1612 static inline void ept_sync_global(void)
1613 {
1614         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1615 }
1616
1617 static inline void ept_sync_context(u64 eptp)
1618 {
1619         if (cpu_has_vmx_invept_context())
1620                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1621         else
1622                 ept_sync_global();
1623 }
1624
1625 static __always_inline void vmcs_check16(unsigned long field)
1626 {
1627         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1628                          "16-bit accessor invalid for 64-bit field");
1629         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1630                          "16-bit accessor invalid for 64-bit high field");
1631         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1632                          "16-bit accessor invalid for 32-bit high field");
1633         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1634                          "16-bit accessor invalid for natural width field");
1635 }
1636
1637 static __always_inline void vmcs_check32(unsigned long field)
1638 {
1639         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1640                          "32-bit accessor invalid for 16-bit field");
1641         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1642                          "32-bit accessor invalid for natural width field");
1643 }
1644
1645 static __always_inline void vmcs_check64(unsigned long field)
1646 {
1647         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1648                          "64-bit accessor invalid for 16-bit field");
1649         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1650                          "64-bit accessor invalid for 64-bit high field");
1651         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1652                          "64-bit accessor invalid for 32-bit field");
1653         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1654                          "64-bit accessor invalid for natural width field");
1655 }
1656
1657 static __always_inline void vmcs_checkl(unsigned long field)
1658 {
1659         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1660                          "Natural width accessor invalid for 16-bit field");
1661         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1662                          "Natural width accessor invalid for 64-bit field");
1663         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1664                          "Natural width accessor invalid for 64-bit high field");
1665         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1666                          "Natural width accessor invalid for 32-bit field");
1667 }
1668
1669 static __always_inline unsigned long __vmcs_readl(unsigned long field)
1670 {
1671         unsigned long value;
1672
1673         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1674                       : "=a"(value) : "d"(field) : "cc");
1675         return value;
1676 }
1677
1678 static __always_inline u16 vmcs_read16(unsigned long field)
1679 {
1680         vmcs_check16(field);
1681         return __vmcs_readl(field);
1682 }
1683
1684 static __always_inline u32 vmcs_read32(unsigned long field)
1685 {
1686         vmcs_check32(field);
1687         return __vmcs_readl(field);
1688 }
1689
1690 static __always_inline u64 vmcs_read64(unsigned long field)
1691 {
1692         vmcs_check64(field);
1693 #ifdef CONFIG_X86_64
1694         return __vmcs_readl(field);
1695 #else
1696         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
1697 #endif
1698 }
1699
1700 static __always_inline unsigned long vmcs_readl(unsigned long field)
1701 {
1702         vmcs_checkl(field);
1703         return __vmcs_readl(field);
1704 }
1705
1706 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1707 {
1708         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1709                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1710         dump_stack();
1711 }
1712
1713 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
1714 {
1715         u8 error;
1716
1717         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1718                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1719         if (unlikely(error))
1720                 vmwrite_error(field, value);
1721 }
1722
1723 static __always_inline void vmcs_write16(unsigned long field, u16 value)
1724 {
1725         vmcs_check16(field);
1726         __vmcs_writel(field, value);
1727 }
1728
1729 static __always_inline void vmcs_write32(unsigned long field, u32 value)
1730 {
1731         vmcs_check32(field);
1732         __vmcs_writel(field, value);
1733 }
1734
1735 static __always_inline void vmcs_write64(unsigned long field, u64 value)
1736 {
1737         vmcs_check64(field);
1738         __vmcs_writel(field, value);
1739 #ifndef CONFIG_X86_64
1740         asm volatile ("");
1741         __vmcs_writel(field+1, value >> 32);
1742 #endif
1743 }
1744
1745 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
1746 {
1747         vmcs_checkl(field);
1748         __vmcs_writel(field, value);
1749 }
1750
1751 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
1752 {
1753         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1754                          "vmcs_clear_bits does not support 64-bit fields");
1755         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
1756 }
1757
1758 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
1759 {
1760         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1761                          "vmcs_set_bits does not support 64-bit fields");
1762         __vmcs_writel(field, __vmcs_readl(field) | mask);
1763 }
1764
1765 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
1766 {
1767         vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
1768 }
1769
1770 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1771 {
1772         vmcs_write32(VM_ENTRY_CONTROLS, val);
1773         vmx->vm_entry_controls_shadow = val;
1774 }
1775
1776 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1777 {
1778         if (vmx->vm_entry_controls_shadow != val)
1779                 vm_entry_controls_init(vmx, val);
1780 }
1781
1782 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1783 {
1784         return vmx->vm_entry_controls_shadow;
1785 }
1786
1787
1788 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1789 {
1790         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1791 }
1792
1793 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1794 {
1795         vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1796 }
1797
1798 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
1799 {
1800         vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
1801 }
1802
1803 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1804 {
1805         vmcs_write32(VM_EXIT_CONTROLS, val);
1806         vmx->vm_exit_controls_shadow = val;
1807 }
1808
1809 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1810 {
1811         if (vmx->vm_exit_controls_shadow != val)
1812                 vm_exit_controls_init(vmx, val);
1813 }
1814
1815 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1816 {
1817         return vmx->vm_exit_controls_shadow;
1818 }
1819
1820
1821 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1822 {
1823         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1824 }
1825
1826 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1827 {
1828         vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1829 }
1830
1831 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1832 {
1833         vmx->segment_cache.bitmask = 0;
1834 }
1835
1836 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1837                                        unsigned field)
1838 {
1839         bool ret;
1840         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1841
1842         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1843                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1844                 vmx->segment_cache.bitmask = 0;
1845         }
1846         ret = vmx->segment_cache.bitmask & mask;
1847         vmx->segment_cache.bitmask |= mask;
1848         return ret;
1849 }
1850
1851 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1852 {
1853         u16 *p = &vmx->segment_cache.seg[seg].selector;
1854
1855         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1856                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1857         return *p;
1858 }
1859
1860 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1861 {
1862         ulong *p = &vmx->segment_cache.seg[seg].base;
1863
1864         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1865                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1866         return *p;
1867 }
1868
1869 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1870 {
1871         u32 *p = &vmx->segment_cache.seg[seg].limit;
1872
1873         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1874                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1875         return *p;
1876 }
1877
1878 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1879 {
1880         u32 *p = &vmx->segment_cache.seg[seg].ar;
1881
1882         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1883                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1884         return *p;
1885 }
1886
1887 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1888 {
1889         u32 eb;
1890
1891         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1892              (1u << DB_VECTOR) | (1u << AC_VECTOR);
1893         if ((vcpu->guest_debug &
1894              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1895             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1896                 eb |= 1u << BP_VECTOR;
1897         if (to_vmx(vcpu)->rmode.vm86_active)
1898                 eb = ~0;
1899         if (enable_ept)
1900                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1901
1902         /* When we are running a nested L2 guest and L1 specified for it a
1903          * certain exception bitmap, we must trap the same exceptions and pass
1904          * them to L1. When running L2, we will only handle the exceptions
1905          * specified above if L1 did not want them.
1906          */
1907         if (is_guest_mode(vcpu))
1908                 eb |= get_vmcs12(vcpu)->exception_bitmap;
1909
1910         vmcs_write32(EXCEPTION_BITMAP, eb);
1911 }
1912
1913 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1914                 unsigned long entry, unsigned long exit)
1915 {
1916         vm_entry_controls_clearbit(vmx, entry);
1917         vm_exit_controls_clearbit(vmx, exit);
1918 }
1919
1920 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1921 {
1922         unsigned i;
1923         struct msr_autoload *m = &vmx->msr_autoload;
1924
1925         switch (msr) {
1926         case MSR_EFER:
1927                 if (cpu_has_load_ia32_efer) {
1928                         clear_atomic_switch_msr_special(vmx,
1929                                         VM_ENTRY_LOAD_IA32_EFER,
1930                                         VM_EXIT_LOAD_IA32_EFER);
1931                         return;
1932                 }
1933                 break;
1934         case MSR_CORE_PERF_GLOBAL_CTRL:
1935                 if (cpu_has_load_perf_global_ctrl) {
1936                         clear_atomic_switch_msr_special(vmx,
1937                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1938                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1939                         return;
1940                 }
1941                 break;
1942         }
1943
1944         for (i = 0; i < m->nr; ++i)
1945                 if (m->guest[i].index == msr)
1946                         break;
1947
1948         if (i == m->nr)
1949                 return;
1950         --m->nr;
1951         m->guest[i] = m->guest[m->nr];
1952         m->host[i] = m->host[m->nr];
1953         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1954         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1955 }
1956
1957 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1958                 unsigned long entry, unsigned long exit,
1959                 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1960                 u64 guest_val, u64 host_val)
1961 {
1962         vmcs_write64(guest_val_vmcs, guest_val);
1963         vmcs_write64(host_val_vmcs, host_val);
1964         vm_entry_controls_setbit(vmx, entry);
1965         vm_exit_controls_setbit(vmx, exit);
1966 }
1967
1968 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1969                                   u64 guest_val, u64 host_val)
1970 {
1971         unsigned i;
1972         struct msr_autoload *m = &vmx->msr_autoload;
1973
1974         switch (msr) {
1975         case MSR_EFER:
1976                 if (cpu_has_load_ia32_efer) {
1977                         add_atomic_switch_msr_special(vmx,
1978                                         VM_ENTRY_LOAD_IA32_EFER,
1979                                         VM_EXIT_LOAD_IA32_EFER,
1980                                         GUEST_IA32_EFER,
1981                                         HOST_IA32_EFER,
1982                                         guest_val, host_val);
1983                         return;
1984                 }
1985                 break;
1986         case MSR_CORE_PERF_GLOBAL_CTRL:
1987                 if (cpu_has_load_perf_global_ctrl) {
1988                         add_atomic_switch_msr_special(vmx,
1989                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1990                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1991                                         GUEST_IA32_PERF_GLOBAL_CTRL,
1992                                         HOST_IA32_PERF_GLOBAL_CTRL,
1993                                         guest_val, host_val);
1994                         return;
1995                 }
1996                 break;
1997         case MSR_IA32_PEBS_ENABLE:
1998                 /* PEBS needs a quiescent period after being disabled (to write
1999                  * a record).  Disabling PEBS through VMX MSR swapping doesn't
2000                  * provide that period, so a CPU could write host's record into
2001                  * guest's memory.
2002                  */
2003                 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
2004         }
2005
2006         for (i = 0; i < m->nr; ++i)
2007                 if (m->guest[i].index == msr)
2008                         break;
2009
2010         if (i == NR_AUTOLOAD_MSRS) {
2011                 printk_once(KERN_WARNING "Not enough msr switch entries. "
2012                                 "Can't add msr %x\n", msr);
2013                 return;
2014         } else if (i == m->nr) {
2015                 ++m->nr;
2016                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2017                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2018         }
2019
2020         m->guest[i].index = msr;
2021         m->guest[i].value = guest_val;
2022         m->host[i].index = msr;
2023         m->host[i].value = host_val;
2024 }
2025
2026 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2027 {
2028         u64 guest_efer = vmx->vcpu.arch.efer;
2029         u64 ignore_bits = 0;
2030
2031         if (!enable_ept) {
2032                 /*
2033                  * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
2034                  * host CPUID is more efficient than testing guest CPUID
2035                  * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
2036                  */
2037                 if (boot_cpu_has(X86_FEATURE_SMEP))
2038                         guest_efer |= EFER_NX;
2039                 else if (!(guest_efer & EFER_NX))
2040                         ignore_bits |= EFER_NX;
2041         }
2042
2043         /*
2044          * LMA and LME handled by hardware; SCE meaningless outside long mode.
2045          */
2046         ignore_bits |= EFER_SCE;
2047 #ifdef CONFIG_X86_64
2048         ignore_bits |= EFER_LMA | EFER_LME;
2049         /* SCE is meaningful only in long mode on Intel */
2050         if (guest_efer & EFER_LMA)
2051                 ignore_bits &= ~(u64)EFER_SCE;
2052 #endif
2053
2054         clear_atomic_switch_msr(vmx, MSR_EFER);
2055
2056         /*
2057          * On EPT, we can't emulate NX, so we must switch EFER atomically.
2058          * On CPUs that support "load IA32_EFER", always switch EFER
2059          * atomically, since it's faster than switching it manually.
2060          */
2061         if (cpu_has_load_ia32_efer ||
2062             (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2063                 if (!(guest_efer & EFER_LMA))
2064                         guest_efer &= ~EFER_LME;
2065                 if (guest_efer != host_efer)
2066                         add_atomic_switch_msr(vmx, MSR_EFER,
2067                                               guest_efer, host_efer);
2068                 return false;
2069         } else {
2070                 guest_efer &= ~ignore_bits;
2071                 guest_efer |= host_efer & ignore_bits;
2072
2073                 vmx->guest_msrs[efer_offset].data = guest_efer;
2074                 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2075
2076                 return true;
2077         }
2078 }
2079
2080 #ifdef CONFIG_X86_32
2081 /*
2082  * On 32-bit kernels, VM exits still load the FS and GS bases from the
2083  * VMCS rather than the segment table.  KVM uses this helper to figure
2084  * out the current bases to poke them into the VMCS before entry.
2085  */
2086 static unsigned long segment_base(u16 selector)
2087 {
2088         struct desc_struct *table;
2089         unsigned long v;
2090
2091         if (!(selector & ~SEGMENT_RPL_MASK))
2092                 return 0;
2093
2094         table = get_current_gdt_ro();
2095
2096         if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2097                 u16 ldt_selector = kvm_read_ldt();
2098
2099                 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2100                         return 0;
2101
2102                 table = (struct desc_struct *)segment_base(ldt_selector);
2103         }
2104         v = get_desc_base(&table[selector >> 3]);
2105         return v;
2106 }
2107 #endif
2108
2109 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2110 {
2111         struct vcpu_vmx *vmx = to_vmx(vcpu);
2112         int i;
2113
2114         if (vmx->host_state.loaded)
2115                 return;
2116
2117         vmx->host_state.loaded = 1;
2118         /*
2119          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
2120          * allow segment selectors with cpl > 0 or ti == 1.
2121          */
2122         vmx->host_state.ldt_sel = kvm_read_ldt();
2123         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
2124         savesegment(fs, vmx->host_state.fs_sel);
2125         if (!(vmx->host_state.fs_sel & 7)) {
2126                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
2127                 vmx->host_state.fs_reload_needed = 0;
2128         } else {
2129                 vmcs_write16(HOST_FS_SELECTOR, 0);
2130                 vmx->host_state.fs_reload_needed = 1;
2131         }
2132         savesegment(gs, vmx->host_state.gs_sel);
2133         if (!(vmx->host_state.gs_sel & 7))
2134                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
2135         else {
2136                 vmcs_write16(HOST_GS_SELECTOR, 0);
2137                 vmx->host_state.gs_ldt_reload_needed = 1;
2138         }
2139
2140 #ifdef CONFIG_X86_64
2141         savesegment(ds, vmx->host_state.ds_sel);
2142         savesegment(es, vmx->host_state.es_sel);
2143 #endif
2144
2145 #ifdef CONFIG_X86_64
2146         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
2147         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
2148 #else
2149         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2150         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2151 #endif
2152
2153 #ifdef CONFIG_X86_64
2154         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2155         if (is_long_mode(&vmx->vcpu))
2156                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2157 #endif
2158         if (boot_cpu_has(X86_FEATURE_MPX))
2159                 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2160         for (i = 0; i < vmx->save_nmsrs; ++i)
2161                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2162                                    vmx->guest_msrs[i].data,
2163                                    vmx->guest_msrs[i].mask);
2164 }
2165
2166 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2167 {
2168         if (!vmx->host_state.loaded)
2169                 return;
2170
2171         ++vmx->vcpu.stat.host_state_reload;
2172         vmx->host_state.loaded = 0;
2173 #ifdef CONFIG_X86_64
2174         if (is_long_mode(&vmx->vcpu))
2175                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2176 #endif
2177         if (vmx->host_state.gs_ldt_reload_needed) {
2178                 kvm_load_ldt(vmx->host_state.ldt_sel);
2179 #ifdef CONFIG_X86_64
2180                 load_gs_index(vmx->host_state.gs_sel);
2181 #else
2182                 loadsegment(gs, vmx->host_state.gs_sel);
2183 #endif
2184         }
2185         if (vmx->host_state.fs_reload_needed)
2186                 loadsegment(fs, vmx->host_state.fs_sel);
2187 #ifdef CONFIG_X86_64
2188         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2189                 loadsegment(ds, vmx->host_state.ds_sel);
2190                 loadsegment(es, vmx->host_state.es_sel);
2191         }
2192 #endif
2193         invalidate_tss_limit();
2194 #ifdef CONFIG_X86_64
2195         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2196 #endif
2197         if (vmx->host_state.msr_host_bndcfgs)
2198                 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2199         load_fixmap_gdt(raw_smp_processor_id());
2200 }
2201
2202 static void vmx_load_host_state(struct vcpu_vmx *vmx)
2203 {
2204         preempt_disable();
2205         __vmx_load_host_state(vmx);
2206         preempt_enable();
2207 }
2208
2209 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2210 {
2211         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2212         struct pi_desc old, new;
2213         unsigned int dest;
2214
2215         /*
2216          * In case of hot-plug or hot-unplug, we may have to undo
2217          * vmx_vcpu_pi_put even if there is no assigned device.  And we
2218          * always keep PI.NDST up to date for simplicity: it makes the
2219          * code easier, and CPU migration is not a fast path.
2220          */
2221         if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
2222                 return;
2223
2224         /*
2225          * First handle the simple case where no cmpxchg is necessary; just
2226          * allow posting non-urgent interrupts.
2227          *
2228          * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2229          * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2230          * expects the VCPU to be on the blocked_vcpu_list that matches
2231          * PI.NDST.
2232          */
2233         if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2234             vcpu->cpu == cpu) {
2235                 pi_clear_sn(pi_desc);
2236                 return;
2237         }
2238
2239         /* The full case.  */
2240         do {
2241                 old.control = new.control = pi_desc->control;
2242
2243                 dest = cpu_physical_id(cpu);
2244
2245                 if (x2apic_enabled())
2246                         new.ndst = dest;
2247                 else
2248                         new.ndst = (dest << 8) & 0xFF00;
2249
2250                 new.sn = 0;
2251         } while (cmpxchg64(&pi_desc->control, old.control,
2252                            new.control) != old.control);
2253 }
2254
2255 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2256 {
2257         vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2258         vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2259 }
2260
2261 /*
2262  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2263  * vcpu mutex is already taken.
2264  */
2265 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2266 {
2267         struct vcpu_vmx *vmx = to_vmx(vcpu);
2268         bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
2269
2270         if (!already_loaded) {
2271                 loaded_vmcs_clear(vmx->loaded_vmcs);
2272                 local_irq_disable();
2273                 crash_disable_local_vmclear(cpu);
2274
2275                 /*
2276                  * Read loaded_vmcs->cpu should be before fetching
2277                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
2278                  * See the comments in __loaded_vmcs_clear().
2279                  */
2280                 smp_rmb();
2281
2282                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2283                          &per_cpu(loaded_vmcss_on_cpu, cpu));
2284                 crash_enable_local_vmclear(cpu);
2285                 local_irq_enable();
2286         }
2287
2288         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2289                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2290                 vmcs_load(vmx->loaded_vmcs->vmcs);
2291         }
2292
2293         if (!already_loaded) {
2294                 void *gdt = get_current_gdt_ro();
2295                 unsigned long sysenter_esp;
2296
2297                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2298
2299                 /*
2300                  * Linux uses per-cpu TSS and GDT, so set these when switching
2301                  * processors.  See 22.2.4.
2302                  */
2303                 vmcs_writel(HOST_TR_BASE,
2304                             (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
2305                 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt);   /* 22.2.4 */
2306
2307                 /*
2308                  * VM exits change the host TR limit to 0x67 after a VM
2309                  * exit.  This is okay, since 0x67 covers everything except
2310                  * the IO bitmap and have have code to handle the IO bitmap
2311                  * being lost after a VM exit.
2312                  */
2313                 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
2314
2315                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2316                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2317
2318                 vmx->loaded_vmcs->cpu = cpu;
2319         }
2320
2321         /* Setup TSC multiplier */
2322         if (kvm_has_tsc_control &&
2323             vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2324                 decache_tsc_multiplier(vmx);
2325
2326         vmx_vcpu_pi_load(vcpu, cpu);
2327         vmx->host_pkru = read_pkru();
2328 }
2329
2330 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2331 {
2332         struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2333
2334         if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2335                 !irq_remapping_cap(IRQ_POSTING_CAP)  ||
2336                 !kvm_vcpu_apicv_active(vcpu))
2337                 return;
2338
2339         /* Set SN when the vCPU is preempted */
2340         if (vcpu->preempted)
2341                 pi_set_sn(pi_desc);
2342 }
2343
2344 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2345 {
2346         vmx_vcpu_pi_put(vcpu);
2347
2348         __vmx_load_host_state(to_vmx(vcpu));
2349 }
2350
2351 static bool emulation_required(struct kvm_vcpu *vcpu)
2352 {
2353         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2354 }
2355
2356 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2357
2358 /*
2359  * Return the cr0 value that a nested guest would read. This is a combination
2360  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2361  * its hypervisor (cr0_read_shadow).
2362  */
2363 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2364 {
2365         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2366                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2367 }
2368 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2369 {
2370         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2371                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2372 }
2373
2374 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2375 {
2376         unsigned long rflags, save_rflags;
2377
2378         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2379                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2380                 rflags = vmcs_readl(GUEST_RFLAGS);
2381                 if (to_vmx(vcpu)->rmode.vm86_active) {
2382                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2383                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2384                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2385                 }
2386                 to_vmx(vcpu)->rflags = rflags;
2387         }
2388         return to_vmx(vcpu)->rflags;
2389 }
2390
2391 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2392 {
2393         unsigned long old_rflags = vmx_get_rflags(vcpu);
2394
2395         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2396         to_vmx(vcpu)->rflags = rflags;
2397         if (to_vmx(vcpu)->rmode.vm86_active) {
2398                 to_vmx(vcpu)->rmode.save_rflags = rflags;
2399                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2400         }
2401         vmcs_writel(GUEST_RFLAGS, rflags);
2402
2403         if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2404                 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2405 }
2406
2407 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2408 {
2409         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2410         int ret = 0;
2411
2412         if (interruptibility & GUEST_INTR_STATE_STI)
2413                 ret |= KVM_X86_SHADOW_INT_STI;
2414         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2415                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2416
2417         return ret;
2418 }
2419
2420 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2421 {
2422         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2423         u32 interruptibility = interruptibility_old;
2424
2425         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2426
2427         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2428                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2429         else if (mask & KVM_X86_SHADOW_INT_STI)
2430                 interruptibility |= GUEST_INTR_STATE_STI;
2431
2432         if ((interruptibility != interruptibility_old))
2433                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2434 }
2435
2436 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2437 {
2438         unsigned long rip;
2439
2440         rip = kvm_rip_read(vcpu);
2441         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2442         kvm_rip_write(vcpu, rip);
2443
2444         /* skipping an emulated instruction also counts */
2445         vmx_set_interrupt_shadow(vcpu, 0);
2446 }
2447
2448 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
2449                                                unsigned long exit_qual)
2450 {
2451         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2452         unsigned int nr = vcpu->arch.exception.nr;
2453         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2454
2455         if (vcpu->arch.exception.has_error_code) {
2456                 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
2457                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2458         }
2459
2460         if (kvm_exception_is_soft(nr))
2461                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2462         else
2463                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2464
2465         if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
2466             vmx_get_nmi_mask(vcpu))
2467                 intr_info |= INTR_INFO_UNBLOCK_NMI;
2468
2469         nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
2470 }
2471
2472 /*
2473  * KVM wants to inject page-faults which it got to the guest. This function
2474  * checks whether in a nested guest, we need to inject them to L1 or L2.
2475  */
2476 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
2477 {
2478         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2479         unsigned int nr = vcpu->arch.exception.nr;
2480
2481         if (nr == PF_VECTOR) {
2482                 if (vcpu->arch.exception.nested_apf) {
2483                         *exit_qual = vcpu->arch.apf.nested_apf_token;
2484                         return 1;
2485                 }
2486                 /*
2487                  * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
2488                  * The fix is to add the ancillary datum (CR2 or DR6) to structs
2489                  * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
2490                  * can be written only when inject_pending_event runs.  This should be
2491                  * conditional on a new capability---if the capability is disabled,
2492                  * kvm_multiple_exception would write the ancillary information to
2493                  * CR2 or DR6, for backwards ABI-compatibility.
2494                  */
2495                 if (nested_vmx_is_page_fault_vmexit(vmcs12,
2496                                                     vcpu->arch.exception.error_code)) {
2497                         *exit_qual = vcpu->arch.cr2;
2498                         return 1;
2499                 }
2500         } else {
2501                 if (vmcs12->exception_bitmap & (1u << nr)) {
2502                         if (nr == DB_VECTOR)
2503                                 *exit_qual = vcpu->arch.dr6;
2504                         else
2505                                 *exit_qual = 0;
2506                         return 1;
2507                 }
2508         }
2509
2510         return 0;
2511 }
2512
2513 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
2514 {
2515         struct vcpu_vmx *vmx = to_vmx(vcpu);
2516         unsigned nr = vcpu->arch.exception.nr;
2517         bool has_error_code = vcpu->arch.exception.has_error_code;
2518         u32 error_code = vcpu->arch.exception.error_code;
2519         u32 intr_info = nr | INTR_INFO_VALID_MASK;
2520
2521         if (has_error_code) {
2522                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2523                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2524         }
2525
2526         if (vmx->rmode.vm86_active) {
2527                 int inc_eip = 0;
2528                 if (kvm_exception_is_soft(nr))
2529                         inc_eip = vcpu->arch.event_exit_inst_len;
2530                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2531                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2532                 return;
2533         }
2534
2535         if (kvm_exception_is_soft(nr)) {
2536                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2537                              vmx->vcpu.arch.event_exit_inst_len);
2538                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2539         } else
2540                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2541
2542         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2543 }
2544
2545 static bool vmx_rdtscp_supported(void)
2546 {
2547         return cpu_has_vmx_rdtscp();
2548 }
2549
2550 static bool vmx_invpcid_supported(void)
2551 {
2552         return cpu_has_vmx_invpcid() && enable_ept;
2553 }
2554
2555 /*
2556  * Swap MSR entry in host/guest MSR entry array.
2557  */
2558 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2559 {
2560         struct shared_msr_entry tmp;
2561
2562         tmp = vmx->guest_msrs[to];
2563         vmx->guest_msrs[to] = vmx->guest_msrs[from];
2564         vmx->guest_msrs[from] = tmp;
2565 }
2566
2567 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2568 {
2569         unsigned long *msr_bitmap;
2570
2571         if (is_guest_mode(vcpu))
2572                 msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
2573         else if (cpu_has_secondary_exec_ctrls() &&
2574                  (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
2575                   SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
2576                 if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) {
2577                         if (is_long_mode(vcpu))
2578                                 msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv;
2579                         else
2580                                 msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv;
2581                 } else {
2582                         if (is_long_mode(vcpu))
2583                                 msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
2584                         else
2585                                 msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
2586                 }
2587         } else {
2588                 if (is_long_mode(vcpu))
2589                         msr_bitmap = vmx_msr_bitmap_longmode;
2590                 else
2591                         msr_bitmap = vmx_msr_bitmap_legacy;
2592         }
2593
2594         vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
2595 }
2596
2597 /*
2598  * Set up the vmcs to automatically save and restore system
2599  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
2600  * mode, as fiddling with msrs is very expensive.
2601  */
2602 static void setup_msrs(struct vcpu_vmx *vmx)
2603 {
2604         int save_nmsrs, index;
2605
2606         save_nmsrs = 0;
2607 #ifdef CONFIG_X86_64
2608         if (is_long_mode(&vmx->vcpu)) {
2609                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2610                 if (index >= 0)
2611                         move_msr_up(vmx, index, save_nmsrs++);
2612                 index = __find_msr_index(vmx, MSR_LSTAR);
2613                 if (index >= 0)
2614                         move_msr_up(vmx, index, save_nmsrs++);
2615                 index = __find_msr_index(vmx, MSR_CSTAR);
2616                 if (index >= 0)
2617                         move_msr_up(vmx, index, save_nmsrs++);
2618                 index = __find_msr_index(vmx, MSR_TSC_AUX);
2619                 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
2620                         move_msr_up(vmx, index, save_nmsrs++);
2621                 /*
2622                  * MSR_STAR is only needed on long mode guests, and only
2623                  * if efer.sce is enabled.
2624                  */
2625                 index = __find_msr_index(vmx, MSR_STAR);
2626                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2627                         move_msr_up(vmx, index, save_nmsrs++);
2628         }
2629 #endif
2630         index = __find_msr_index(vmx, MSR_EFER);
2631         if (index >= 0 && update_transition_efer(vmx, index))
2632                 move_msr_up(vmx, index, save_nmsrs++);
2633
2634         vmx->save_nmsrs = save_nmsrs;
2635
2636         if (cpu_has_vmx_msr_bitmap())
2637                 vmx_set_msr_bitmap(&vmx->vcpu);
2638 }
2639
2640 /*
2641  * reads and returns guest's timestamp counter "register"
2642  * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2643  * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2644  */
2645 static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2646 {
2647         u64 host_tsc, tsc_offset;
2648
2649         host_tsc = rdtsc();
2650         tsc_offset = vmcs_read64(TSC_OFFSET);
2651         return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
2652 }
2653
2654 /*
2655  * writes 'offset' into guest's timestamp counter offset register
2656  */
2657 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2658 {
2659         if (is_guest_mode(vcpu)) {
2660                 /*
2661                  * We're here if L1 chose not to trap WRMSR to TSC. According
2662                  * to the spec, this should set L1's TSC; The offset that L1
2663                  * set for L2 remains unchanged, and still needs to be added
2664                  * to the newly set TSC to get L2's TSC.
2665                  */
2666                 struct vmcs12 *vmcs12;
2667                 /* recalculate vmcs02.TSC_OFFSET: */
2668                 vmcs12 = get_vmcs12(vcpu);
2669                 vmcs_write64(TSC_OFFSET, offset +
2670                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2671                          vmcs12->tsc_offset : 0));
2672         } else {
2673                 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2674                                            vmcs_read64(TSC_OFFSET), offset);
2675                 vmcs_write64(TSC_OFFSET, offset);
2676         }
2677 }
2678
2679 /*
2680  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2681  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2682  * all guests if the "nested" module option is off, and can also be disabled
2683  * for a single guest by disabling its VMX cpuid bit.
2684  */
2685 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2686 {
2687         return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
2688 }
2689
2690 /*
2691  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2692  * returned for the various VMX controls MSRs when nested VMX is enabled.
2693  * The same values should also be used to verify that vmcs12 control fields are
2694  * valid during nested entry from L1 to L2.
2695  * Each of these control msrs has a low and high 32-bit half: A low bit is on
2696  * if the corresponding bit in the (32-bit) control field *must* be on, and a
2697  * bit in the high half is on if the corresponding bit in the control field
2698  * may be on. See also vmx_control_verify().
2699  */
2700 static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2701 {
2702         /*
2703          * Note that as a general rule, the high half of the MSRs (bits in
2704          * the control fields which may be 1) should be initialized by the
2705          * intersection of the underlying hardware's MSR (i.e., features which
2706          * can be supported) and the list of features we want to expose -
2707          * because they are known to be properly supported in our code.
2708          * Also, usually, the low half of the MSRs (bits which must be 1) can
2709          * be set to 0, meaning that L1 may turn off any of these bits. The
2710          * reason is that if one of these bits is necessary, it will appear
2711          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2712          * fields of vmcs01 and vmcs02, will turn these bits off - and
2713          * nested_vmx_exit_reflected() will not pass related exits to L1.
2714          * These rules have exceptions below.
2715          */
2716
2717         /* pin-based controls */
2718         rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2719                 vmx->nested.nested_vmx_pinbased_ctls_low,
2720                 vmx->nested.nested_vmx_pinbased_ctls_high);
2721         vmx->nested.nested_vmx_pinbased_ctls_low |=
2722                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2723         vmx->nested.nested_vmx_pinbased_ctls_high &=
2724                 PIN_BASED_EXT_INTR_MASK |
2725                 PIN_BASED_NMI_EXITING |
2726                 PIN_BASED_VIRTUAL_NMIS;
2727         vmx->nested.nested_vmx_pinbased_ctls_high |=
2728                 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2729                 PIN_BASED_VMX_PREEMPTION_TIMER;
2730         if (kvm_vcpu_apicv_active(&vmx->vcpu))
2731                 vmx->nested.nested_vmx_pinbased_ctls_high |=
2732                         PIN_BASED_POSTED_INTR;
2733
2734         /* exit controls */
2735         rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2736                 vmx->nested.nested_vmx_exit_ctls_low,
2737                 vmx->nested.nested_vmx_exit_ctls_high);
2738         vmx->nested.nested_vmx_exit_ctls_low =
2739                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2740
2741         vmx->nested.nested_vmx_exit_ctls_high &=
2742 #ifdef CONFIG_X86_64
2743                 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2744 #endif
2745                 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2746         vmx->nested.nested_vmx_exit_ctls_high |=
2747                 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2748                 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2749                 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2750
2751         if (kvm_mpx_supported())
2752                 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2753
2754         /* We support free control of debug control saving. */
2755         vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2756
2757         /* entry controls */
2758         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2759                 vmx->nested.nested_vmx_entry_ctls_low,
2760                 vmx->nested.nested_vmx_entry_ctls_high);
2761         vmx->nested.nested_vmx_entry_ctls_low =
2762                 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2763         vmx->nested.nested_vmx_entry_ctls_high &=
2764 #ifdef CONFIG_X86_64
2765                 VM_ENTRY_IA32E_MODE |
2766 #endif
2767                 VM_ENTRY_LOAD_IA32_PAT;
2768         vmx->nested.nested_vmx_entry_ctls_high |=
2769                 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2770         if (kvm_mpx_supported())
2771                 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2772
2773         /* We support free control of debug control loading. */
2774         vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2775
2776         /* cpu-based controls */
2777         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2778                 vmx->nested.nested_vmx_procbased_ctls_low,
2779                 vmx->nested.nested_vmx_procbased_ctls_high);
2780         vmx->nested.nested_vmx_procbased_ctls_low =
2781                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2782         vmx->nested.nested_vmx_procbased_ctls_high &=
2783                 CPU_BASED_VIRTUAL_INTR_PENDING |
2784                 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2785                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2786                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2787                 CPU_BASED_CR3_STORE_EXITING |
2788 #ifdef CONFIG_X86_64
2789                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2790 #endif
2791                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2792                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
2793                 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
2794                 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
2795                 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2796         /*
2797          * We can allow some features even when not supported by the
2798          * hardware. For example, L1 can specify an MSR bitmap - and we
2799          * can use it to avoid exits to L1 - even when L0 runs L2
2800          * without MSR bitmaps.
2801          */
2802         vmx->nested.nested_vmx_procbased_ctls_high |=
2803                 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2804                 CPU_BASED_USE_MSR_BITMAPS;
2805
2806         /* We support free control of CR3 access interception. */
2807         vmx->nested.nested_vmx_procbased_ctls_low &=
2808                 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
2809
2810         /*
2811          * secondary cpu-based controls.  Do not include those that
2812          * depend on CPUID bits, they are added later by vmx_cpuid_update.
2813          */
2814         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2815                 vmx->nested.nested_vmx_secondary_ctls_low,
2816                 vmx->nested.nested_vmx_secondary_ctls_high);
2817         vmx->nested.nested_vmx_secondary_ctls_low = 0;
2818         vmx->nested.nested_vmx_secondary_ctls_high &=
2819                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2820                 SECONDARY_EXEC_DESC |
2821                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2822                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2823                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2824                 SECONDARY_EXEC_WBINVD_EXITING;
2825
2826         if (enable_ept) {
2827                 /* nested EPT: emulate EPT also to L1 */
2828                 vmx->nested.nested_vmx_secondary_ctls_high |=
2829                         SECONDARY_EXEC_ENABLE_EPT;
2830                 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2831                          VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
2832                 if (cpu_has_vmx_ept_execute_only())
2833                         vmx->nested.nested_vmx_ept_caps |=
2834                                 VMX_EPT_EXECUTE_ONLY_BIT;
2835                 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
2836                 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
2837                         VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
2838                         VMX_EPT_1GB_PAGE_BIT;
2839                 if (enable_ept_ad_bits) {
2840                         vmx->nested.nested_vmx_secondary_ctls_high |=
2841                                 SECONDARY_EXEC_ENABLE_PML;
2842                         vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
2843                 }
2844         }
2845
2846         if (cpu_has_vmx_vmfunc()) {
2847                 vmx->nested.nested_vmx_secondary_ctls_high |=
2848                         SECONDARY_EXEC_ENABLE_VMFUNC;
2849                 /*
2850                  * Advertise EPTP switching unconditionally
2851                  * since we emulate it
2852                  */
2853                 if (enable_ept)
2854                         vmx->nested.nested_vmx_vmfunc_controls =
2855                                 VMX_VMFUNC_EPTP_SWITCHING;
2856         }
2857
2858         /*
2859          * Old versions of KVM use the single-context version without
2860          * checking for support, so declare that it is supported even
2861          * though it is treated as global context.  The alternative is
2862          * not failing the single-context invvpid, and it is worse.
2863          */
2864         if (enable_vpid) {
2865                 vmx->nested.nested_vmx_secondary_ctls_high |=
2866                         SECONDARY_EXEC_ENABLE_VPID;
2867                 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2868                         VMX_VPID_EXTENT_SUPPORTED_MASK;
2869         }
2870
2871         if (enable_unrestricted_guest)
2872                 vmx->nested.nested_vmx_secondary_ctls_high |=
2873                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
2874
2875         /* miscellaneous data */
2876         rdmsr(MSR_IA32_VMX_MISC,
2877                 vmx->nested.nested_vmx_misc_low,
2878                 vmx->nested.nested_vmx_misc_high);
2879         vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2880         vmx->nested.nested_vmx_misc_low |=
2881                 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
2882                 VMX_MISC_ACTIVITY_HLT;
2883         vmx->nested.nested_vmx_misc_high = 0;
2884
2885         /*
2886          * This MSR reports some information about VMX support. We
2887          * should return information about the VMX we emulate for the
2888          * guest, and the VMCS structure we give it - not about the
2889          * VMX support of the underlying hardware.
2890          */
2891         vmx->nested.nested_vmx_basic =
2892                 VMCS12_REVISION |
2893                 VMX_BASIC_TRUE_CTLS |
2894                 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2895                 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2896
2897         if (cpu_has_vmx_basic_inout())
2898                 vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
2899
2900         /*
2901          * These MSRs specify bits which the guest must keep fixed on
2902          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2903          * We picked the standard core2 setting.
2904          */
2905 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2906 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
2907         vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
2908         vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
2909
2910         /* These MSRs specify bits which the guest must keep fixed off. */
2911         rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
2912         rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
2913
2914         /* highest index: VMX_PREEMPTION_TIMER_VALUE */
2915         vmx->nested.nested_vmx_vmcs_enum = 0x2e;
2916 }
2917
2918 /*
2919  * if fixed0[i] == 1: val[i] must be 1
2920  * if fixed1[i] == 0: val[i] must be 0
2921  */
2922 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
2923 {
2924         return ((val & fixed1) | fixed0) == val;
2925 }
2926
2927 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2928 {
2929         return fixed_bits_valid(control, low, high);
2930 }
2931
2932 static inline u64 vmx_control_msr(u32 low, u32 high)
2933 {
2934         return low | ((u64)high << 32);
2935 }
2936
2937 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
2938 {
2939         superset &= mask;
2940         subset &= mask;
2941
2942         return (superset | subset) == superset;
2943 }
2944
2945 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
2946 {
2947         const u64 feature_and_reserved =
2948                 /* feature (except bit 48; see below) */
2949                 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
2950                 /* reserved */
2951                 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
2952         u64 vmx_basic = vmx->nested.nested_vmx_basic;
2953
2954         if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
2955                 return -EINVAL;
2956
2957         /*
2958          * KVM does not emulate a version of VMX that constrains physical
2959          * addresses of VMX structures (e.g. VMCS) to 32-bits.
2960          */
2961         if (data & BIT_ULL(48))
2962                 return -EINVAL;
2963
2964         if (vmx_basic_vmcs_revision_id(vmx_basic) !=
2965             vmx_basic_vmcs_revision_id(data))
2966                 return -EINVAL;
2967
2968         if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
2969                 return -EINVAL;
2970
2971         vmx->nested.nested_vmx_basic = data;
2972         return 0;
2973 }
2974
2975 static int
2976 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
2977 {
2978         u64 supported;
2979         u32 *lowp, *highp;
2980
2981         switch (msr_index) {
2982         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2983                 lowp = &vmx->nested.nested_vmx_pinbased_ctls_low;
2984                 highp = &vmx->nested.nested_vmx_pinbased_ctls_high;
2985                 break;
2986         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2987                 lowp = &vmx->nested.nested_vmx_procbased_ctls_low;
2988                 highp = &vmx->nested.nested_vmx_procbased_ctls_high;
2989                 break;
2990         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2991                 lowp = &vmx->nested.nested_vmx_exit_ctls_low;
2992                 highp = &vmx->nested.nested_vmx_exit_ctls_high;
2993                 break;
2994         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2995                 lowp = &vmx->nested.nested_vmx_entry_ctls_low;
2996                 highp = &vmx->nested.nested_vmx_entry_ctls_high;
2997                 break;
2998         case MSR_IA32_VMX_PROCBASED_CTLS2:
2999                 lowp = &vmx->nested.nested_vmx_secondary_ctls_low;
3000                 highp = &vmx->nested.nested_vmx_secondary_ctls_high;
3001                 break;
3002         default:
3003                 BUG();
3004         }
3005
3006         supported = vmx_control_msr(*lowp, *highp);
3007
3008         /* Check must-be-1 bits are still 1. */
3009         if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
3010                 return -EINVAL;
3011
3012         /* Check must-be-0 bits are still 0. */
3013         if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
3014                 return -EINVAL;
3015
3016         *lowp = data;
3017         *highp = data >> 32;
3018         return 0;
3019 }
3020
3021 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3022 {
3023         const u64 feature_and_reserved_bits =
3024                 /* feature */
3025                 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3026                 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3027                 /* reserved */
3028                 GENMASK_ULL(13, 9) | BIT_ULL(31);
3029         u64 vmx_misc;
3030
3031         vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low,
3032                                    vmx->nested.nested_vmx_misc_high);
3033
3034         if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3035                 return -EINVAL;
3036
3037         if ((vmx->nested.nested_vmx_pinbased_ctls_high &
3038              PIN_BASED_VMX_PREEMPTION_TIMER) &&
3039             vmx_misc_preemption_timer_rate(data) !=
3040             vmx_misc_preemption_timer_rate(vmx_misc))
3041                 return -EINVAL;
3042
3043         if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3044                 return -EINVAL;
3045
3046         if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3047                 return -EINVAL;
3048
3049         if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3050                 return -EINVAL;
3051
3052         vmx->nested.nested_vmx_misc_low = data;
3053         vmx->nested.nested_vmx_misc_high = data >> 32;
3054         return 0;
3055 }
3056
3057 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3058 {
3059         u64 vmx_ept_vpid_cap;
3060
3061         vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
3062                                            vmx->nested.nested_vmx_vpid_caps);
3063
3064         /* Every bit is either reserved or a feature bit. */
3065         if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3066                 return -EINVAL;
3067
3068         vmx->nested.nested_vmx_ept_caps = data;
3069         vmx->nested.nested_vmx_vpid_caps = data >> 32;
3070         return 0;
3071 }
3072
3073 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3074 {
3075         u64 *msr;
3076
3077         switch (msr_index) {
3078         case MSR_IA32_VMX_CR0_FIXED0:
3079                 msr = &vmx->nested.nested_vmx_cr0_fixed0;
3080                 break;
3081         case MSR_IA32_VMX_CR4_FIXED0:
3082                 msr = &vmx->nested.nested_vmx_cr4_fixed0;
3083                 break;
3084         default:
3085                 BUG();
3086         }
3087
3088         /*
3089          * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3090          * must be 1 in the restored value.
3091          */
3092         if (!is_bitwise_subset(data, *msr, -1ULL))
3093                 return -EINVAL;
3094
3095         *msr = data;
3096         return 0;
3097 }
3098
3099 /*
3100  * Called when userspace is restoring VMX MSRs.
3101  *
3102  * Returns 0 on success, non-0 otherwise.
3103  */
3104 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3105 {
3106         struct vcpu_vmx *vmx = to_vmx(vcpu);
3107
3108         switch (msr_index) {
3109         case MSR_IA32_VMX_BASIC:
3110                 return vmx_restore_vmx_basic(vmx, data);
3111         case MSR_IA32_VMX_PINBASED_CTLS:
3112         case MSR_IA32_VMX_PROCBASED_CTLS:
3113         case MSR_IA32_VMX_EXIT_CTLS:
3114         case MSR_IA32_VMX_ENTRY_CTLS:
3115                 /*
3116                  * The "non-true" VMX capability MSRs are generated from the
3117                  * "true" MSRs, so we do not support restoring them directly.
3118                  *
3119                  * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3120                  * should restore the "true" MSRs with the must-be-1 bits
3121                  * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3122                  * DEFAULT SETTINGS".
3123                  */
3124                 return -EINVAL;
3125         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3126         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3127         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3128         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3129         case MSR_IA32_VMX_PROCBASED_CTLS2:
3130                 return vmx_restore_control_msr(vmx, msr_index, data);
3131         case MSR_IA32_VMX_MISC:
3132                 return vmx_restore_vmx_misc(vmx, data);
3133         case MSR_IA32_VMX_CR0_FIXED0:
3134         case MSR_IA32_VMX_CR4_FIXED0:
3135                 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3136         case MSR_IA32_VMX_CR0_FIXED1:
3137         case MSR_IA32_VMX_CR4_FIXED1:
3138                 /*
3139                  * These MSRs are generated based on the vCPU's CPUID, so we
3140                  * do not support restoring them directly.
3141                  */
3142                 return -EINVAL;
3143         case MSR_IA32_VMX_EPT_VPID_CAP:
3144                 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3145         case MSR_IA32_VMX_VMCS_ENUM:
3146                 vmx->nested.nested_vmx_vmcs_enum = data;
3147                 return 0;
3148         default:
3149                 /*
3150                  * The rest of the VMX capability MSRs do not support restore.
3151                  */
3152                 return -EINVAL;
3153         }
3154 }
3155
3156 /* Returns 0 on success, non-0 otherwise. */
3157 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
3158 {
3159         struct vcpu_vmx *vmx = to_vmx(vcpu);
3160
3161         switch (msr_index) {
3162         case MSR_IA32_VMX_BASIC:
3163                 *pdata = vmx->nested.nested_vmx_basic;
3164                 break;
3165         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3166         case MSR_IA32_VMX_PINBASED_CTLS:
3167                 *pdata = vmx_control_msr(
3168                         vmx->nested.nested_vmx_pinbased_ctls_low,
3169                         vmx->nested.nested_vmx_pinbased_ctls_high);
3170                 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3171                         *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3172                 break;
3173         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3174         case MSR_IA32_VMX_PROCBASED_CTLS:
3175                 *pdata = vmx_control_msr(
3176                         vmx->nested.nested_vmx_procbased_ctls_low,
3177                         vmx->nested.nested_vmx_procbased_ctls_high);
3178                 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3179                         *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3180                 break;
3181         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3182         case MSR_IA32_VMX_EXIT_CTLS:
3183                 *pdata = vmx_control_msr(
3184                         vmx->nested.nested_vmx_exit_ctls_low,
3185                         vmx->nested.nested_vmx_exit_ctls_high);
3186                 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3187                         *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3188                 break;
3189         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3190         case MSR_IA32_VMX_ENTRY_CTLS:
3191                 *pdata = vmx_control_msr(
3192                         vmx->nested.nested_vmx_entry_ctls_low,
3193                         vmx->nested.nested_vmx_entry_ctls_high);
3194                 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3195                         *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3196                 break;
3197         case MSR_IA32_VMX_MISC:
3198                 *pdata = vmx_control_msr(
3199                         vmx->nested.nested_vmx_misc_low,
3200                         vmx->nested.nested_vmx_misc_high);
3201                 break;
3202         case MSR_IA32_VMX_CR0_FIXED0:
3203                 *pdata = vmx->nested.nested_vmx_cr0_fixed0;
3204                 break;
3205         case MSR_IA32_VMX_CR0_FIXED1:
3206                 *pdata = vmx->nested.nested_vmx_cr0_fixed1;
3207                 break;
3208         case MSR_IA32_VMX_CR4_FIXED0:
3209                 *pdata = vmx->nested.nested_vmx_cr4_fixed0;
3210                 break;
3211         case MSR_IA32_VMX_CR4_FIXED1:
3212                 *pdata = vmx->nested.nested_vmx_cr4_fixed1;
3213                 break;
3214         case MSR_IA32_VMX_VMCS_ENUM:
3215                 *pdata = vmx->nested.nested_vmx_vmcs_enum;
3216                 break;
3217         case MSR_IA32_VMX_PROCBASED_CTLS2:
3218                 *pdata = vmx_control_msr(
3219                         vmx->nested.nested_vmx_secondary_ctls_low,
3220                         vmx->nested.nested_vmx_secondary_ctls_high);
3221                 break;
3222         case MSR_IA32_VMX_EPT_VPID_CAP:
3223                 *pdata = vmx->nested.nested_vmx_ept_caps |
3224                         ((u64)vmx->nested.nested_vmx_vpid_caps << 32);
3225                 break;
3226         case MSR_IA32_VMX_VMFUNC:
3227                 *pdata = vmx->nested.nested_vmx_vmfunc_controls;
3228                 break;
3229         default:
3230                 return 1;
3231         }
3232
3233         return 0;
3234 }
3235
3236 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
3237                                                  uint64_t val)
3238 {
3239         uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
3240
3241         return !(val & ~valid_bits);
3242 }
3243
3244 /*
3245  * Reads an msr value (of 'msr_index') into 'pdata'.
3246  * Returns 0 on success, non-0 otherwise.
3247  * Assumes vcpu_load() was already called.
3248  */
3249 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3250 {
3251         struct shared_msr_entry *msr;
3252
3253         switch (msr_info->index) {
3254 #ifdef CONFIG_X86_64
3255         case MSR_FS_BASE:
3256                 msr_info->data = vmcs_readl(GUEST_FS_BASE);
3257                 break;
3258         case MSR_GS_BASE:
3259                 msr_info->data = vmcs_readl(GUEST_GS_BASE);
3260                 break;
3261         case MSR_KERNEL_GS_BASE:
3262                 vmx_load_host_state(to_vmx(vcpu));
3263                 msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
3264                 break;
3265 #endif
3266         case MSR_EFER:
3267                 return kvm_get_msr_common(vcpu, msr_info);
3268         case MSR_IA32_TSC:
3269                 msr_info->data = guest_read_tsc(vcpu);
3270                 break;
3271         case MSR_IA32_SYSENTER_CS:
3272                 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
3273                 break;
3274         case MSR_IA32_SYSENTER_EIP:
3275                 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
3276                 break;
3277         case MSR_IA32_SYSENTER_ESP:
3278                 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
3279                 break;
3280         case MSR_IA32_BNDCFGS:
3281                 if (!kvm_mpx_supported() ||
3282                     (!msr_info->host_initiated &&
3283                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3284                         return 1;
3285                 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
3286                 break;
3287         case MSR_IA32_MCG_EXT_CTL:
3288                 if (!msr_info->host_initiated &&
3289                     !(to_vmx(vcpu)->msr_ia32_feature_control &
3290                       FEATURE_CONTROL_LMCE))
3291                         return 1;
3292                 msr_info->data = vcpu->arch.mcg_ext_ctl;
3293                 break;
3294         case MSR_IA32_FEATURE_CONTROL:
3295                 msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
3296                 break;
3297         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3298                 if (!nested_vmx_allowed(vcpu))
3299                         return 1;
3300                 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
3301         case MSR_IA32_XSS:
3302                 if (!vmx_xsaves_supported())
3303                         return 1;
3304                 msr_info->data = vcpu->arch.ia32_xss;
3305                 break;
3306         case MSR_TSC_AUX:
3307                 if (!msr_info->host_initiated &&
3308                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3309                         return 1;
3310                 /* Otherwise falls through */
3311         default:
3312                 msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
3313                 if (msr) {
3314                         msr_info->data = msr->data;
3315                         break;
3316                 }
3317                 return kvm_get_msr_common(vcpu, msr_info);
3318         }
3319
3320         return 0;
3321 }
3322
3323 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
3324
3325 /*
3326  * Writes msr value into into the appropriate "register".
3327  * Returns 0 on success, non-0 otherwise.
3328  * Assumes vcpu_load() was already called.
3329  */
3330 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3331 {
3332         struct vcpu_vmx *vmx = to_vmx(vcpu);
3333         struct shared_msr_entry *msr;
3334         int ret = 0;
3335         u32 msr_index = msr_info->index;
3336         u64 data = msr_info->data;
3337
3338         switch (msr_index) {
3339         case MSR_EFER:
3340                 ret = kvm_set_msr_common(vcpu, msr_info);
3341                 break;
3342 #ifdef CONFIG_X86_64
3343         case MSR_FS_BASE:
3344                 vmx_segment_cache_clear(vmx);
3345                 vmcs_writel(GUEST_FS_BASE, data);
3346                 break;
3347         case MSR_GS_BASE:
3348                 vmx_segment_cache_clear(vmx);
3349                 vmcs_writel(GUEST_GS_BASE, data);
3350                 break;
3351         case MSR_KERNEL_GS_BASE:
3352                 vmx_load_host_state(vmx);
3353                 vmx->msr_guest_kernel_gs_base = data;
3354                 break;
3355 #endif
3356         case MSR_IA32_SYSENTER_CS:
3357                 vmcs_write32(GUEST_SYSENTER_CS, data);
3358                 break;
3359         case MSR_IA32_SYSENTER_EIP:
3360                 vmcs_writel(GUEST_SYSENTER_EIP, data);
3361                 break;
3362         case MSR_IA32_SYSENTER_ESP:
3363                 vmcs_writel(GUEST_SYSENTER_ESP, data);
3364                 break;
3365         case MSR_IA32_BNDCFGS:
3366                 if (!kvm_mpx_supported() ||
3367                     (!msr_info->host_initiated &&
3368                      !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3369                         return 1;
3370                 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
3371                     (data & MSR_IA32_BNDCFGS_RSVD))
3372                         return 1;
3373                 vmcs_write64(GUEST_BNDCFGS, data);
3374                 break;
3375         case MSR_IA32_TSC:
3376                 kvm_write_tsc(vcpu, msr_info);
3377                 break;
3378         case MSR_IA32_CR_PAT:
3379                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
3380                         if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3381                                 return 1;
3382                         vmcs_write64(GUEST_IA32_PAT, data);
3383                         vcpu->arch.pat = data;
3384                         break;
3385                 }
3386                 ret = kvm_set_msr_common(vcpu, msr_info);
3387                 break;
3388         case MSR_IA32_TSC_ADJUST:
3389                 ret = kvm_set_msr_common(vcpu, msr_info);
3390                 break;
3391         case MSR_IA32_MCG_EXT_CTL:
3392                 if ((!msr_info->host_initiated &&
3393                      !(to_vmx(vcpu)->msr_ia32_feature_control &
3394                        FEATURE_CONTROL_LMCE)) ||
3395                     (data & ~MCG_EXT_CTL_LMCE_EN))
3396                         return 1;
3397                 vcpu->arch.mcg_ext_ctl = data;
3398                 break;
3399         case MSR_IA32_FEATURE_CONTROL:
3400                 if (!vmx_feature_control_msr_valid(vcpu, data) ||
3401                     (to_vmx(vcpu)->msr_ia32_feature_control &
3402                      FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
3403                         return 1;
3404                 vmx->msr_ia32_feature_control = data;
3405                 if (msr_info->host_initiated && data == 0)
3406                         vmx_leave_nested(vcpu);
3407                 break;
3408         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3409                 if (!msr_info->host_initiated)
3410                         return 1; /* they are read-only */
3411                 if (!nested_vmx_allowed(vcpu))
3412                         return 1;
3413                 return vmx_set_vmx_msr(vcpu, msr_index, data);
3414         case MSR_IA32_XSS:
3415                 if (!vmx_xsaves_supported())
3416                         return 1;
3417                 /*
3418                  * The only supported bit as of Skylake is bit 8, but
3419                  * it is not supported on KVM.
3420                  */
3421                 if (data != 0)
3422                         return 1;
3423                 vcpu->arch.ia32_xss = data;
3424                 if (vcpu->arch.ia32_xss != host_xss)
3425                         add_atomic_switch_msr(vmx, MSR_IA32_XSS,
3426                                 vcpu->arch.ia32_xss, host_xss);
3427                 else
3428                         clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
3429                 break;
3430         case MSR_TSC_AUX:
3431                 if (!msr_info->host_initiated &&
3432                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3433                         return 1;
3434                 /* Check reserved bit, higher 32 bits should be zero */
3435                 if ((data >> 32) != 0)
3436                         return 1;
3437                 /* Otherwise falls through */
3438         default:
3439                 msr = find_msr_entry(vmx, msr_index);
3440                 if (msr) {
3441                         u64 old_msr_data = msr->data;
3442                         msr->data = data;
3443                         if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
3444                                 preempt_disable();
3445                                 ret = kvm_set_shared_msr(msr->index, msr->data,
3446                                                          msr->mask);
3447                                 preempt_enable();
3448                                 if (ret)
3449                                         msr->data = old_msr_data;
3450                         }
3451                         break;
3452                 }
3453                 ret = kvm_set_msr_common(vcpu, msr_info);
3454         }
3455
3456         return ret;
3457 }
3458
3459 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
3460 {
3461         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
3462         switch (reg) {
3463         case VCPU_REGS_RSP:
3464                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
3465                 break;
3466         case VCPU_REGS_RIP:
3467                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
3468                 break;
3469         case VCPU_EXREG_PDPTR:
3470                 if (enable_ept)
3471                         ept_save_pdptrs(vcpu);
3472                 break;
3473         default:
3474                 break;
3475         }
3476 }
3477
3478 static __init int cpu_has_kvm_support(void)
3479 {
3480         return cpu_has_vmx();
3481 }
3482
3483 static __init int vmx_disabled_by_bios(void)
3484 {
3485         u64 msr;
3486
3487         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
3488         if (msr & FEATURE_CONTROL_LOCKED) {
3489                 /* launched w/ TXT and VMX disabled */
3490                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
3491                         && tboot_enabled())
3492                         return 1;
3493                 /* launched w/o TXT and VMX only enabled w/ TXT */
3494                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
3495                         && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
3496                         && !tboot_enabled()) {
3497                         printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
3498                                 "activate TXT before enabling KVM\n");
3499                         return 1;
3500                 }
3501                 /* launched w/o TXT and VMX disabled */
3502                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
3503                         && !tboot_enabled())
3504                         return 1;
3505         }
3506
3507         return 0;
3508 }
3509
3510 static void kvm_cpu_vmxon(u64 addr)
3511 {
3512         cr4_set_bits(X86_CR4_VMXE);
3513         intel_pt_handle_vmx(1);
3514
3515         asm volatile (ASM_VMX_VMXON_RAX
3516                         : : "a"(&addr), "m"(addr)
3517                         : "memory", "cc");
3518 }
3519
3520 static int hardware_enable(void)
3521 {
3522         int cpu = raw_smp_processor_id();
3523         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
3524         u64 old, test_bits;
3525
3526         if (cr4_read_shadow() & X86_CR4_VMXE)
3527                 return -EBUSY;
3528
3529         INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
3530         INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
3531         spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
3532
3533         /*
3534          * Now we can enable the vmclear operation in kdump
3535          * since the loaded_vmcss_on_cpu list on this cpu
3536          * has been initialized.
3537          *
3538          * Though the cpu is not in VMX operation now, there
3539          * is no problem to enable the vmclear operation
3540          * for the loaded_vmcss_on_cpu list is empty!
3541          */
3542         crash_enable_local_vmclear(cpu);
3543
3544         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
3545
3546         test_bits = FEATURE_CONTROL_LOCKED;
3547         test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
3548         if (tboot_enabled())
3549                 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
3550
3551         if ((old & test_bits) != test_bits) {
3552                 /* enable and lock */
3553                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
3554         }
3555         kvm_cpu_vmxon(phys_addr);
3556         if (enable_ept)
3557                 ept_sync_global();
3558
3559         return 0;
3560 }
3561
3562 static void vmclear_local_loaded_vmcss(void)
3563 {
3564         int cpu = raw_smp_processor_id();
3565         struct loaded_vmcs *v, *n;
3566
3567         list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
3568                                  loaded_vmcss_on_cpu_link)
3569                 __loaded_vmcs_clear(v);
3570 }
3571
3572
3573 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
3574  * tricks.
3575  */
3576 static void kvm_cpu_vmxoff(void)
3577 {
3578         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
3579
3580         intel_pt_handle_vmx(0);
3581         cr4_clear_bits(X86_CR4_VMXE);
3582 }
3583
3584 static void hardware_disable(void)
3585 {
3586         vmclear_local_loaded_vmcss();
3587         kvm_cpu_vmxoff();
3588 }
3589
3590 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
3591                                       u32 msr, u32 *result)
3592 {
3593         u32 vmx_msr_low, vmx_msr_high;
3594         u32 ctl = ctl_min | ctl_opt;
3595
3596         rdmsr(msr, vmx_msr_low, vmx_msr_high);
3597
3598         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
3599         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
3600
3601         /* Ensure minimum (required) set of control bits are supported. */
3602         if (ctl_min & ~ctl)
3603                 return -EIO;
3604
3605         *result = ctl;
3606         return 0;
3607 }
3608
3609 static __init bool allow_1_setting(u32 msr, u32 ctl)
3610 {
3611         u32 vmx_msr_low, vmx_msr_high;
3612
3613         rdmsr(msr, vmx_msr_low, vmx_msr_high);
3614         return vmx_msr_high & ctl;
3615 }
3616
3617 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
3618 {
3619         u32 vmx_msr_low, vmx_msr_high;
3620         u32 min, opt, min2, opt2;
3621         u32 _pin_based_exec_control = 0;
3622         u32 _cpu_based_exec_control = 0;
3623         u32 _cpu_based_2nd_exec_control = 0;
3624         u32 _vmexit_control = 0;
3625         u32 _vmentry_control = 0;
3626
3627         min = CPU_BASED_HLT_EXITING |
3628 #ifdef CONFIG_X86_64
3629               CPU_BASED_CR8_LOAD_EXITING |
3630               CPU_BASED_CR8_STORE_EXITING |
3631 #endif
3632               CPU_BASED_CR3_LOAD_EXITING |
3633               CPU_BASED_CR3_STORE_EXITING |
3634               CPU_BASED_USE_IO_BITMAPS |
3635               CPU_BASED_MOV_DR_EXITING |
3636               CPU_BASED_USE_TSC_OFFSETING |
3637               CPU_BASED_INVLPG_EXITING |
3638               CPU_BASED_RDPMC_EXITING;
3639
3640         if (!kvm_mwait_in_guest())
3641                 min |= CPU_BASED_MWAIT_EXITING |
3642                         CPU_BASED_MONITOR_EXITING;
3643
3644         opt = CPU_BASED_TPR_SHADOW |
3645               CPU_BASED_USE_MSR_BITMAPS |
3646               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
3647         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
3648                                 &_cpu_based_exec_control) < 0)
3649                 return -EIO;
3650 #ifdef CONFIG_X86_64
3651         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
3652                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
3653                                            ~CPU_BASED_CR8_STORE_EXITING;
3654 #endif
3655         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
3656                 min2 = 0;
3657                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3658                         SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3659                         SECONDARY_EXEC_WBINVD_EXITING |
3660                         SECONDARY_EXEC_ENABLE_VPID |
3661                         SECONDARY_EXEC_ENABLE_EPT |
3662                         SECONDARY_EXEC_UNRESTRICTED_GUEST |
3663                         SECONDARY_EXEC_PAUSE_LOOP_EXITING |
3664                         SECONDARY_EXEC_RDTSCP |
3665                         SECONDARY_EXEC_ENABLE_INVPCID |
3666                         SECONDARY_EXEC_APIC_REGISTER_VIRT |
3667                         SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3668                         SECONDARY_EXEC_SHADOW_VMCS |
3669                         SECONDARY_EXEC_XSAVES |
3670                         SECONDARY_EXEC_RDSEED_EXITING |
3671                         SECONDARY_EXEC_RDRAND_EXITING |
3672                         SECONDARY_EXEC_ENABLE_PML |
3673                         SECONDARY_EXEC_TSC_SCALING |
3674                         SECONDARY_EXEC_ENABLE_VMFUNC;
3675                 if (adjust_vmx_controls(min2, opt2,
3676                                         MSR_IA32_VMX_PROCBASED_CTLS2,
3677                                         &_cpu_based_2nd_exec_control) < 0)
3678                         return -EIO;
3679         }
3680 #ifndef CONFIG_X86_64
3681         if (!(_cpu_based_2nd_exec_control &
3682                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
3683                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
3684 #endif
3685
3686         if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
3687                 _cpu_based_2nd_exec_control &= ~(
3688                                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3689                                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3690                                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
3691
3692         rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
3693                 &vmx_capability.ept, &vmx_capability.vpid);
3694
3695         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
3696                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
3697                    enabled */
3698                 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
3699                                              CPU_BASED_CR3_STORE_EXITING |
3700                                              CPU_BASED_INVLPG_EXITING);
3701         } else if (vmx_capability.ept) {
3702                 vmx_capability.ept = 0;
3703                 pr_warn_once("EPT CAP should not exist if not support "
3704                                 "1-setting enable EPT VM-execution control\n");
3705         }
3706         if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
3707                 vmx_capability.vpid) {
3708                 vmx_capability.vpid = 0;
3709                 pr_warn_once("VPID CAP should not exist if not support "
3710                                 "1-setting enable VPID VM-execution control\n");
3711         }
3712
3713         min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
3714 #ifdef CONFIG_X86_64
3715         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
3716 #endif
3717         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
3718                 VM_EXIT_CLEAR_BNDCFGS;
3719         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
3720                                 &_vmexit_control) < 0)
3721                 return -EIO;
3722
3723         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
3724         opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
3725                  PIN_BASED_VMX_PREEMPTION_TIMER;
3726         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
3727                                 &_pin_based_exec_control) < 0)
3728                 return -EIO;
3729
3730         if (cpu_has_broken_vmx_preemption_timer())
3731                 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
3732         if (!(_cpu_based_2nd_exec_control &
3733                 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
3734                 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
3735
3736         min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
3737         opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
3738         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
3739                                 &_vmentry_control) < 0)
3740                 return -EIO;
3741
3742         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
3743
3744         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
3745         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
3746                 return -EIO;
3747
3748 #ifdef CONFIG_X86_64
3749         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
3750         if (vmx_msr_high & (1u<<16))
3751                 return -EIO;
3752 #endif
3753
3754         /* Require Write-Back (WB) memory type for VMCS accesses. */
3755         if (((vmx_msr_high >> 18) & 15) != 6)
3756                 return -EIO;
3757
3758         vmcs_conf->size = vmx_msr_high & 0x1fff;
3759         vmcs_conf->order = get_order(vmcs_conf->size);
3760         vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
3761         vmcs_conf->revision_id = vmx_msr_low;
3762
3763         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
3764         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
3765         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
3766         vmcs_conf->vmexit_ctrl         = _vmexit_control;
3767         vmcs_conf->vmentry_ctrl        = _vmentry_control;
3768
3769         cpu_has_load_ia32_efer =
3770                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
3771                                 VM_ENTRY_LOAD_IA32_EFER)
3772                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
3773                                    VM_EXIT_LOAD_IA32_EFER);
3774
3775         cpu_has_load_perf_global_ctrl =
3776                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
3777                                 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
3778                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
3779                                    VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
3780
3781         /*
3782          * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
3783          * but due to errata below it can't be used. Workaround is to use
3784          * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
3785          *
3786          * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
3787          *
3788          * AAK155             (model 26)
3789          * AAP115             (model 30)
3790          * AAT100             (model 37)
3791          * BC86,AAY89,BD102   (model 44)
3792          * BA97               (model 46)
3793          *
3794          */
3795         if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
3796                 switch (boot_cpu_data.x86_model) {
3797                 case 26:
3798                 case 30:
3799                 case 37:
3800                 case 44:
3801                 case 46:
3802                         cpu_has_load_perf_global_ctrl = false;
3803                         printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
3804                                         "does not work properly. Using workaround\n");
3805                         break;
3806                 default:
3807                         break;
3808                 }
3809         }
3810
3811         if (boot_cpu_has(X86_FEATURE_XSAVES))
3812                 rdmsrl(MSR_IA32_XSS, host_xss);
3813
3814         return 0;
3815 }
3816
3817 static struct vmcs *alloc_vmcs_cpu(int cpu)
3818 {
3819         int node = cpu_to_node(cpu);
3820         struct page *pages;
3821         struct vmcs *vmcs;
3822
3823         pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
3824         if (!pages)
3825                 return NULL;
3826         vmcs = page_address(pages);
3827         memset(vmcs, 0, vmcs_config.size);
3828         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
3829         return vmcs;
3830 }
3831
3832 static void free_vmcs(struct vmcs *vmcs)
3833 {
3834         free_pages((unsigned long)vmcs, vmcs_config.order);
3835 }
3836
3837 /*
3838  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
3839  */
3840 static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
3841 {
3842         if (!loaded_vmcs->vmcs)
3843                 return;
3844         loaded_vmcs_clear(loaded_vmcs);
3845         free_vmcs(loaded_vmcs->vmcs);
3846         loaded_vmcs->vmcs = NULL;
3847         WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
3848 }
3849
3850 static struct vmcs *alloc_vmcs(void)
3851 {
3852         return alloc_vmcs_cpu(raw_smp_processor_id());
3853 }
3854
3855 static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
3856 {
3857         loaded_vmcs->vmcs = alloc_vmcs();
3858         if (!loaded_vmcs->vmcs)
3859                 return -ENOMEM;
3860
3861         loaded_vmcs->shadow_vmcs = NULL;
3862         loaded_vmcs_init(loaded_vmcs);
3863         return 0;
3864 }
3865
3866 static void free_kvm_area(void)
3867 {
3868         int cpu;
3869
3870         for_each_possible_cpu(cpu) {
3871                 free_vmcs(per_cpu(vmxarea, cpu));
3872                 per_cpu(vmxarea, cpu) = NULL;
3873         }
3874 }
3875
3876 enum vmcs_field_type {
3877         VMCS_FIELD_TYPE_U16 = 0,
3878         VMCS_FIELD_TYPE_U64 = 1,
3879         VMCS_FIELD_TYPE_U32 = 2,
3880         VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
3881 };
3882
3883 static inline int vmcs_field_type(unsigned long field)
3884 {
3885         if (0x1 & field)        /* the *_HIGH fields are all 32 bit */
3886                 return VMCS_FIELD_TYPE_U32;
3887         return (field >> 13) & 0x3 ;
3888 }
3889
3890 static inline int vmcs_field_readonly(unsigned long field)
3891 {
3892         return (((field >> 10) & 0x3) == 1);
3893 }
3894
3895 static void init_vmcs_shadow_fields(void)
3896 {
3897         int i, j;
3898
3899         /* No checks for read only fields yet */
3900
3901         for (i = j = 0; i < max_shadow_read_write_fields; i++) {
3902                 switch (shadow_read_write_fields[i]) {
3903                 case GUEST_BNDCFGS:
3904                         if (!kvm_mpx_supported())
3905                                 continue;
3906                         break;
3907                 default:
3908                         break;
3909                 }
3910
3911                 if (j < i)
3912                         shadow_read_write_fields[j] =
3913                                 shadow_read_write_fields[i];
3914                 j++;
3915         }
3916         max_shadow_read_write_fields = j;
3917
3918         /* shadowed fields guest access without vmexit */
3919         for (i = 0; i < max_shadow_read_write_fields; i++) {
3920                 unsigned long field = shadow_read_write_fields[i];
3921
3922                 clear_bit(field, vmx_vmwrite_bitmap);
3923                 clear_bit(field, vmx_vmread_bitmap);
3924                 if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) {
3925                         clear_bit(field + 1, vmx_vmwrite_bitmap);
3926                         clear_bit(field + 1, vmx_vmread_bitmap);
3927                 }
3928         }
3929         for (i = 0; i < max_shadow_read_only_fields; i++) {
3930                 unsigned long field = shadow_read_only_fields[i];
3931
3932                 clear_bit(field, vmx_vmread_bitmap);
3933                 if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64)
3934                         clear_bit(field + 1, vmx_vmread_bitmap);
3935