2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
14 * This work is licensed under the terms of the GNU GPL, version 2. See
15 * the COPYING file in the top-level directory.
24 #include <linux/kvm_host.h>
25 #include <linux/module.h>
26 #include <linux/kernel.h>
28 #include <linux/highmem.h>
29 #include <linux/sched.h>
30 #include <linux/moduleparam.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/tboot.h>
35 #include <linux/hrtimer.h>
36 #include <linux/frame.h>
37 #include "kvm_cache_regs.h"
44 #include <asm/virtext.h>
46 #include <asm/fpu/internal.h>
47 #include <asm/perf_event.h>
48 #include <asm/debugreg.h>
49 #include <asm/kexec.h>
51 #include <asm/irq_remapping.h>
52 #include <asm/mmu_context.h>
57 #define __ex(x) __kvm_handle_fault_on_reboot(x)
58 #define __ex_clear(x, reg) \
59 ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
61 MODULE_AUTHOR("Qumranet");
62 MODULE_LICENSE("GPL");
64 static const struct x86_cpu_id vmx_cpu_id[] = {
65 X86_FEATURE_MATCH(X86_FEATURE_VMX),
68 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
70 static bool __read_mostly enable_vpid = 1;
71 module_param_named(vpid, enable_vpid, bool, 0444);
73 static bool __read_mostly flexpriority_enabled = 1;
74 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
76 static bool __read_mostly enable_ept = 1;
77 module_param_named(ept, enable_ept, bool, S_IRUGO);
79 static bool __read_mostly enable_unrestricted_guest = 1;
80 module_param_named(unrestricted_guest,
81 enable_unrestricted_guest, bool, S_IRUGO);
83 static bool __read_mostly enable_ept_ad_bits = 1;
84 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
86 static bool __read_mostly emulate_invalid_guest_state = true;
87 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
89 static bool __read_mostly fasteoi = 1;
90 module_param(fasteoi, bool, S_IRUGO);
92 static bool __read_mostly enable_apicv = 1;
93 module_param(enable_apicv, bool, S_IRUGO);
95 static bool __read_mostly enable_shadow_vmcs = 1;
96 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
98 * If nested=1, nested virtualization is supported, i.e., guests may use
99 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
100 * use VMX instructions.
102 static bool __read_mostly nested = 0;
103 module_param(nested, bool, S_IRUGO);
105 static u64 __read_mostly host_xss;
107 static bool __read_mostly enable_pml = 1;
108 module_param_named(pml, enable_pml, bool, S_IRUGO);
110 #define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
112 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */
113 static int __read_mostly cpu_preemption_timer_multi;
114 static bool __read_mostly enable_preemption_timer = 1;
116 module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
119 #define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
120 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
121 #define KVM_VM_CR0_ALWAYS_ON \
122 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
123 #define KVM_CR4_GUEST_OWNED_BITS \
124 (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
125 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
127 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
128 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
130 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
132 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
135 * Hyper-V requires all of these, so mark them as supported even though
136 * they are just treated the same as all-context.
138 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
139 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
140 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
141 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
142 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
145 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
146 * ple_gap: upper bound on the amount of time between two successive
147 * executions of PAUSE in a loop. Also indicate if ple enabled.
148 * According to test, this time is usually smaller than 128 cycles.
149 * ple_window: upper bound on the amount of time a guest is allowed to execute
150 * in a PAUSE loop. Tests indicate that most spinlocks are held for
151 * less than 2^12 cycles
152 * Time is measured based on a counter that runs at the same rate as the TSC,
153 * refer SDM volume 3b section 21.6.13 & 22.1.3.
155 #define KVM_VMX_DEFAULT_PLE_GAP 128
156 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
157 #define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2
158 #define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
159 #define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \
160 INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
162 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
163 module_param(ple_gap, int, S_IRUGO);
165 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
166 module_param(ple_window, int, S_IRUGO);
168 /* Default doubles per-vcpu window every exit. */
169 static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
170 module_param(ple_window_grow, int, S_IRUGO);
172 /* Default resets per-vcpu window every exit to ple_window. */
173 static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
174 module_param(ple_window_shrink, int, S_IRUGO);
176 /* Default is to compute the maximum so we can never overflow. */
177 static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
178 static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
179 module_param(ple_window_max, int, S_IRUGO);
181 extern const ulong vmx_return;
183 #define NR_AUTOLOAD_MSRS 8
184 #define VMCS02_POOL_SIZE 1
193 * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
194 * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
195 * loaded on this CPU (so we can clear them if the CPU goes down).
199 struct vmcs *shadow_vmcs;
202 bool nmi_known_unmasked;
203 unsigned long vmcs_host_cr3; /* May not match real cr3 */
204 unsigned long vmcs_host_cr4; /* May not match real cr4 */
205 struct list_head loaded_vmcss_on_cpu_link;
208 struct shared_msr_entry {
215 * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
216 * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
217 * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
218 * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
219 * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
220 * More than one of these structures may exist, if L1 runs multiple L2 guests.
221 * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
222 * underlying hardware which will be used to run L2.
223 * This structure is packed to ensure that its layout is identical across
224 * machines (necessary for live migration).
225 * If there are changes in this struct, VMCS12_REVISION must be changed.
227 typedef u64 natural_width;
228 struct __packed vmcs12 {
229 /* According to the Intel spec, a VMCS region must start with the
230 * following two fields. Then follow implementation-specific data.
235 u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
236 u32 padding[7]; /* room for future expansion */
241 u64 vm_exit_msr_store_addr;
242 u64 vm_exit_msr_load_addr;
243 u64 vm_entry_msr_load_addr;
245 u64 virtual_apic_page_addr;
246 u64 apic_access_addr;
247 u64 posted_intr_desc_addr;
248 u64 vm_function_control;
250 u64 eoi_exit_bitmap0;
251 u64 eoi_exit_bitmap1;
252 u64 eoi_exit_bitmap2;
253 u64 eoi_exit_bitmap3;
254 u64 eptp_list_address;
256 u64 guest_physical_address;
257 u64 vmcs_link_pointer;
259 u64 guest_ia32_debugctl;
262 u64 guest_ia32_perf_global_ctrl;
270 u64 host_ia32_perf_global_ctrl;
271 u64 padding64[8]; /* room for future expansion */
273 * To allow migration of L1 (complete with its L2 guests) between
274 * machines of different natural widths (32 or 64 bit), we cannot have
275 * unsigned long fields with no explict size. We use u64 (aliased
276 * natural_width) instead. Luckily, x86 is little-endian.
278 natural_width cr0_guest_host_mask;
279 natural_width cr4_guest_host_mask;
280 natural_width cr0_read_shadow;
281 natural_width cr4_read_shadow;
282 natural_width cr3_target_value0;
283 natural_width cr3_target_value1;
284 natural_width cr3_target_value2;
285 natural_width cr3_target_value3;
286 natural_width exit_qualification;
287 natural_width guest_linear_address;
288 natural_width guest_cr0;
289 natural_width guest_cr3;
290 natural_width guest_cr4;
291 natural_width guest_es_base;
292 natural_width guest_cs_base;
293 natural_width guest_ss_base;
294 natural_width guest_ds_base;
295 natural_width guest_fs_base;
296 natural_width guest_gs_base;
297 natural_width guest_ldtr_base;
298 natural_width guest_tr_base;
299 natural_width guest_gdtr_base;
300 natural_width guest_idtr_base;
301 natural_width guest_dr7;
302 natural_width guest_rsp;
303 natural_width guest_rip;
304 natural_width guest_rflags;
305 natural_width guest_pending_dbg_exceptions;
306 natural_width guest_sysenter_esp;
307 natural_width guest_sysenter_eip;
308 natural_width host_cr0;
309 natural_width host_cr3;
310 natural_width host_cr4;
311 natural_width host_fs_base;
312 natural_width host_gs_base;
313 natural_width host_tr_base;
314 natural_width host_gdtr_base;
315 natural_width host_idtr_base;
316 natural_width host_ia32_sysenter_esp;
317 natural_width host_ia32_sysenter_eip;
318 natural_width host_rsp;
319 natural_width host_rip;
320 natural_width paddingl[8]; /* room for future expansion */
321 u32 pin_based_vm_exec_control;
322 u32 cpu_based_vm_exec_control;
323 u32 exception_bitmap;
324 u32 page_fault_error_code_mask;
325 u32 page_fault_error_code_match;
326 u32 cr3_target_count;
327 u32 vm_exit_controls;
328 u32 vm_exit_msr_store_count;
329 u32 vm_exit_msr_load_count;
330 u32 vm_entry_controls;
331 u32 vm_entry_msr_load_count;
332 u32 vm_entry_intr_info_field;
333 u32 vm_entry_exception_error_code;
334 u32 vm_entry_instruction_len;
336 u32 secondary_vm_exec_control;
337 u32 vm_instruction_error;
339 u32 vm_exit_intr_info;
340 u32 vm_exit_intr_error_code;
341 u32 idt_vectoring_info_field;
342 u32 idt_vectoring_error_code;
343 u32 vm_exit_instruction_len;
344 u32 vmx_instruction_info;
351 u32 guest_ldtr_limit;
353 u32 guest_gdtr_limit;
354 u32 guest_idtr_limit;
355 u32 guest_es_ar_bytes;
356 u32 guest_cs_ar_bytes;
357 u32 guest_ss_ar_bytes;
358 u32 guest_ds_ar_bytes;
359 u32 guest_fs_ar_bytes;
360 u32 guest_gs_ar_bytes;
361 u32 guest_ldtr_ar_bytes;
362 u32 guest_tr_ar_bytes;
363 u32 guest_interruptibility_info;
364 u32 guest_activity_state;
365 u32 guest_sysenter_cs;
366 u32 host_ia32_sysenter_cs;
367 u32 vmx_preemption_timer_value;
368 u32 padding32[7]; /* room for future expansion */
369 u16 virtual_processor_id;
371 u16 guest_es_selector;
372 u16 guest_cs_selector;
373 u16 guest_ss_selector;
374 u16 guest_ds_selector;
375 u16 guest_fs_selector;
376 u16 guest_gs_selector;
377 u16 guest_ldtr_selector;
378 u16 guest_tr_selector;
379 u16 guest_intr_status;
381 u16 host_es_selector;
382 u16 host_cs_selector;
383 u16 host_ss_selector;
384 u16 host_ds_selector;
385 u16 host_fs_selector;
386 u16 host_gs_selector;
387 u16 host_tr_selector;
391 * VMCS12_REVISION is an arbitrary id that should be changed if the content or
392 * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
393 * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
395 #define VMCS12_REVISION 0x11e57ed0
398 * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
399 * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
400 * current implementation, 4K are reserved to avoid future complications.
402 #define VMCS12_SIZE 0x1000
404 /* Used to remember the last vmcs02 used for some recently used vmcs12s */
406 struct list_head list;
408 struct loaded_vmcs vmcs02;
412 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
413 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
416 /* Has the level1 guest done vmxon? */
421 /* The guest-physical address of the current VMCS L1 keeps for L2 */
424 * Cache of the guest's VMCS, existing outside of guest memory.
425 * Loaded from guest memory during VMPTRLD. Flushed to guest
426 * memory during VMCLEAR and VMPTRLD.
428 struct vmcs12 *cached_vmcs12;
430 * Indicates if the shadow vmcs must be updated with the
431 * data hold by vmcs12
433 bool sync_shadow_vmcs;
435 /* vmcs02_list cache of VMCSs recently used to run L2 guests */
436 struct list_head vmcs02_pool;
438 bool change_vmcs01_virtual_x2apic_mode;
439 /* L2 must run next, and mustn't decide to exit to L1. */
440 bool nested_run_pending;
442 * Guest pages referred to in vmcs02 with host-physical pointers, so
443 * we must keep them pinned while L2 runs.
445 struct page *apic_access_page;
446 struct page *virtual_apic_page;
447 struct page *pi_desc_page;
448 struct pi_desc *pi_desc;
452 unsigned long *msr_bitmap;
454 struct hrtimer preemption_timer;
455 bool preemption_timer_expired;
457 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
464 * We only store the "true" versions of the VMX capability MSRs. We
465 * generate the "non-true" versions by setting the must-be-1 bits
466 * according to the SDM.
468 u32 nested_vmx_procbased_ctls_low;
469 u32 nested_vmx_procbased_ctls_high;
470 u32 nested_vmx_secondary_ctls_low;
471 u32 nested_vmx_secondary_ctls_high;
472 u32 nested_vmx_pinbased_ctls_low;
473 u32 nested_vmx_pinbased_ctls_high;
474 u32 nested_vmx_exit_ctls_low;
475 u32 nested_vmx_exit_ctls_high;
476 u32 nested_vmx_entry_ctls_low;
477 u32 nested_vmx_entry_ctls_high;
478 u32 nested_vmx_misc_low;
479 u32 nested_vmx_misc_high;
480 u32 nested_vmx_ept_caps;
481 u32 nested_vmx_vpid_caps;
482 u64 nested_vmx_basic;
483 u64 nested_vmx_cr0_fixed0;
484 u64 nested_vmx_cr0_fixed1;
485 u64 nested_vmx_cr4_fixed0;
486 u64 nested_vmx_cr4_fixed1;
487 u64 nested_vmx_vmcs_enum;
488 u64 nested_vmx_vmfunc_controls;
491 #define POSTED_INTR_ON 0
492 #define POSTED_INTR_SN 1
494 /* Posted-Interrupt Descriptor */
496 u32 pir[8]; /* Posted interrupt requested */
499 /* bit 256 - Outstanding Notification */
501 /* bit 257 - Suppress Notification */
503 /* bit 271:258 - Reserved */
505 /* bit 279:272 - Notification Vector */
507 /* bit 287:280 - Reserved */
509 /* bit 319:288 - Notification Destination */
517 static bool pi_test_and_set_on(struct pi_desc *pi_desc)
519 return test_and_set_bit(POSTED_INTR_ON,
520 (unsigned long *)&pi_desc->control);
523 static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
525 return test_and_clear_bit(POSTED_INTR_ON,
526 (unsigned long *)&pi_desc->control);
529 static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
531 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
534 static inline void pi_clear_sn(struct pi_desc *pi_desc)
536 return clear_bit(POSTED_INTR_SN,
537 (unsigned long *)&pi_desc->control);
540 static inline void pi_set_sn(struct pi_desc *pi_desc)
542 return set_bit(POSTED_INTR_SN,
543 (unsigned long *)&pi_desc->control);
546 static inline void pi_clear_on(struct pi_desc *pi_desc)
548 clear_bit(POSTED_INTR_ON,
549 (unsigned long *)&pi_desc->control);
552 static inline int pi_test_on(struct pi_desc *pi_desc)
554 return test_bit(POSTED_INTR_ON,
555 (unsigned long *)&pi_desc->control);
558 static inline int pi_test_sn(struct pi_desc *pi_desc)
560 return test_bit(POSTED_INTR_SN,
561 (unsigned long *)&pi_desc->control);
565 struct kvm_vcpu vcpu;
566 unsigned long host_rsp;
569 u32 idt_vectoring_info;
571 struct shared_msr_entry *guest_msrs;
574 unsigned long host_idt_base;
576 u64 msr_host_kernel_gs_base;
577 u64 msr_guest_kernel_gs_base;
579 u32 vm_entry_controls_shadow;
580 u32 vm_exit_controls_shadow;
581 u32 secondary_exec_control;
584 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
585 * non-nested (L1) guest, it always points to vmcs01. For a nested
586 * guest (L2), it points to a different VMCS.
588 struct loaded_vmcs vmcs01;
589 struct loaded_vmcs *loaded_vmcs;
590 bool __launched; /* temporary, used in vmx_vcpu_run */
591 struct msr_autoload {
593 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
594 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
598 u16 fs_sel, gs_sel, ldt_sel;
602 int gs_ldt_reload_needed;
603 int fs_reload_needed;
604 u64 msr_host_bndcfgs;
609 struct kvm_segment segs[8];
612 u32 bitmask; /* 4 bits per segment (1 bit per field) */
613 struct kvm_save_segment {
621 bool emulation_required;
625 /* Posted interrupt descriptor */
626 struct pi_desc pi_desc;
628 /* Support for a guest hypervisor (nested VMX) */
629 struct nested_vmx nested;
631 /* Dynamic PLE window. */
633 bool ple_window_dirty;
635 /* Support for PML */
636 #define PML_ENTITY_NUM 512
639 /* apic deadline value in host tsc */
642 u64 current_tsc_ratio;
647 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
648 * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
649 * in msr_ia32_feature_control_valid_bits.
651 u64 msr_ia32_feature_control;
652 u64 msr_ia32_feature_control_valid_bits;
655 enum segment_cache_field {
664 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
666 return container_of(vcpu, struct vcpu_vmx, vcpu);
669 static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
671 return &(to_vmx(vcpu)->pi_desc);
674 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
675 #define FIELD(number, name) [number] = VMCS12_OFFSET(name)
676 #define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
677 [number##_HIGH] = VMCS12_OFFSET(name)+4
680 static unsigned long shadow_read_only_fields[] = {
682 * We do NOT shadow fields that are modified when L0
683 * traps and emulates any vmx instruction (e.g. VMPTRLD,
684 * VMXON...) executed by L1.
685 * For example, VM_INSTRUCTION_ERROR is read
686 * by L1 if a vmx instruction fails (part of the error path).
687 * Note the code assumes this logic. If for some reason
688 * we start shadowing these fields then we need to
689 * force a shadow sync when L0 emulates vmx instructions
690 * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
691 * by nested_vmx_failValid)
695 VM_EXIT_INSTRUCTION_LEN,
696 IDT_VECTORING_INFO_FIELD,
697 IDT_VECTORING_ERROR_CODE,
698 VM_EXIT_INTR_ERROR_CODE,
700 GUEST_LINEAR_ADDRESS,
701 GUEST_PHYSICAL_ADDRESS
703 static int max_shadow_read_only_fields =
704 ARRAY_SIZE(shadow_read_only_fields);
706 static unsigned long shadow_read_write_fields[] = {
713 GUEST_INTERRUPTIBILITY_INFO,
726 CPU_BASED_VM_EXEC_CONTROL,
727 VM_ENTRY_EXCEPTION_ERROR_CODE,
728 VM_ENTRY_INTR_INFO_FIELD,
729 VM_ENTRY_INSTRUCTION_LEN,
730 VM_ENTRY_EXCEPTION_ERROR_CODE,
736 static int max_shadow_read_write_fields =
737 ARRAY_SIZE(shadow_read_write_fields);
739 static const unsigned short vmcs_field_to_offset_table[] = {
740 FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
741 FIELD(POSTED_INTR_NV, posted_intr_nv),
742 FIELD(GUEST_ES_SELECTOR, guest_es_selector),
743 FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
744 FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
745 FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
746 FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
747 FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
748 FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
749 FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
750 FIELD(GUEST_INTR_STATUS, guest_intr_status),
751 FIELD(GUEST_PML_INDEX, guest_pml_index),
752 FIELD(HOST_ES_SELECTOR, host_es_selector),
753 FIELD(HOST_CS_SELECTOR, host_cs_selector),
754 FIELD(HOST_SS_SELECTOR, host_ss_selector),
755 FIELD(HOST_DS_SELECTOR, host_ds_selector),
756 FIELD(HOST_FS_SELECTOR, host_fs_selector),
757 FIELD(HOST_GS_SELECTOR, host_gs_selector),
758 FIELD(HOST_TR_SELECTOR, host_tr_selector),
759 FIELD64(IO_BITMAP_A, io_bitmap_a),
760 FIELD64(IO_BITMAP_B, io_bitmap_b),
761 FIELD64(MSR_BITMAP, msr_bitmap),
762 FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
763 FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
764 FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
765 FIELD64(TSC_OFFSET, tsc_offset),
766 FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
767 FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
768 FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
769 FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
770 FIELD64(EPT_POINTER, ept_pointer),
771 FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
772 FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
773 FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
774 FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
775 FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
776 FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
777 FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
778 FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
779 FIELD64(PML_ADDRESS, pml_address),
780 FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
781 FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
782 FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
783 FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
784 FIELD64(GUEST_PDPTR0, guest_pdptr0),
785 FIELD64(GUEST_PDPTR1, guest_pdptr1),
786 FIELD64(GUEST_PDPTR2, guest_pdptr2),
787 FIELD64(GUEST_PDPTR3, guest_pdptr3),
788 FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
789 FIELD64(HOST_IA32_PAT, host_ia32_pat),
790 FIELD64(HOST_IA32_EFER, host_ia32_efer),
791 FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
792 FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
793 FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
794 FIELD(EXCEPTION_BITMAP, exception_bitmap),
795 FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
796 FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
797 FIELD(CR3_TARGET_COUNT, cr3_target_count),
798 FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
799 FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
800 FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
801 FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
802 FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
803 FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
804 FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
805 FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
806 FIELD(TPR_THRESHOLD, tpr_threshold),
807 FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
808 FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
809 FIELD(VM_EXIT_REASON, vm_exit_reason),
810 FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
811 FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
812 FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
813 FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
814 FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
815 FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
816 FIELD(GUEST_ES_LIMIT, guest_es_limit),
817 FIELD(GUEST_CS_LIMIT, guest_cs_limit),
818 FIELD(GUEST_SS_LIMIT, guest_ss_limit),
819 FIELD(GUEST_DS_LIMIT, guest_ds_limit),
820 FIELD(GUEST_FS_LIMIT, guest_fs_limit),
821 FIELD(GUEST_GS_LIMIT, guest_gs_limit),
822 FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
823 FIELD(GUEST_TR_LIMIT, guest_tr_limit),
824 FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
825 FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
826 FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
827 FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
828 FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
829 FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
830 FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
831 FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
832 FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
833 FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
834 FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
835 FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
836 FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
837 FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
838 FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
839 FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
840 FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
841 FIELD(CR0_READ_SHADOW, cr0_read_shadow),
842 FIELD(CR4_READ_SHADOW, cr4_read_shadow),
843 FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
844 FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
845 FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
846 FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
847 FIELD(EXIT_QUALIFICATION, exit_qualification),
848 FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
849 FIELD(GUEST_CR0, guest_cr0),
850 FIELD(GUEST_CR3, guest_cr3),
851 FIELD(GUEST_CR4, guest_cr4),
852 FIELD(GUEST_ES_BASE, guest_es_base),
853 FIELD(GUEST_CS_BASE, guest_cs_base),
854 FIELD(GUEST_SS_BASE, guest_ss_base),
855 FIELD(GUEST_DS_BASE, guest_ds_base),
856 FIELD(GUEST_FS_BASE, guest_fs_base),
857 FIELD(GUEST_GS_BASE, guest_gs_base),
858 FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
859 FIELD(GUEST_TR_BASE, guest_tr_base),
860 FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
861 FIELD(GUEST_IDTR_BASE, guest_idtr_base),
862 FIELD(GUEST_DR7, guest_dr7),
863 FIELD(GUEST_RSP, guest_rsp),
864 FIELD(GUEST_RIP, guest_rip),
865 FIELD(GUEST_RFLAGS, guest_rflags),
866 FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
867 FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
868 FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
869 FIELD(HOST_CR0, host_cr0),
870 FIELD(HOST_CR3, host_cr3),
871 FIELD(HOST_CR4, host_cr4),
872 FIELD(HOST_FS_BASE, host_fs_base),
873 FIELD(HOST_GS_BASE, host_gs_base),
874 FIELD(HOST_TR_BASE, host_tr_base),
875 FIELD(HOST_GDTR_BASE, host_gdtr_base),
876 FIELD(HOST_IDTR_BASE, host_idtr_base),
877 FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
878 FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
879 FIELD(HOST_RSP, host_rsp),
880 FIELD(HOST_RIP, host_rip),
883 static inline short vmcs_field_to_offset(unsigned long field)
885 BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
887 if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
888 vmcs_field_to_offset_table[field] == 0)
891 return vmcs_field_to_offset_table[field];
894 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
896 return to_vmx(vcpu)->nested.cached_vmcs12;
899 static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
900 static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
901 static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
902 static bool vmx_xsaves_supported(void);
903 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
904 static void vmx_set_segment(struct kvm_vcpu *vcpu,
905 struct kvm_segment *var, int seg);
906 static void vmx_get_segment(struct kvm_vcpu *vcpu,
907 struct kvm_segment *var, int seg);
908 static bool guest_state_valid(struct kvm_vcpu *vcpu);
909 static u32 vmx_segment_access_rights(struct kvm_segment *var);
910 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
911 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
912 static int alloc_identity_pagetable(struct kvm *kvm);
913 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
914 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
915 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
918 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
919 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
921 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
922 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
924 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
927 * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
928 * can find which vCPU should be waken up.
930 static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
931 static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
936 VMX_MSR_BITMAP_LEGACY,
937 VMX_MSR_BITMAP_LONGMODE,
938 VMX_MSR_BITMAP_LEGACY_X2APIC_APICV,
939 VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV,
940 VMX_MSR_BITMAP_LEGACY_X2APIC,
941 VMX_MSR_BITMAP_LONGMODE_X2APIC,
947 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
949 #define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A])
950 #define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B])
951 #define vmx_msr_bitmap_legacy (vmx_bitmap[VMX_MSR_BITMAP_LEGACY])
952 #define vmx_msr_bitmap_longmode (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE])
953 #define vmx_msr_bitmap_legacy_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC_APICV])
954 #define vmx_msr_bitmap_longmode_x2apic_apicv (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC_APICV])
955 #define vmx_msr_bitmap_legacy_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LEGACY_X2APIC])
956 #define vmx_msr_bitmap_longmode_x2apic (vmx_bitmap[VMX_MSR_BITMAP_LONGMODE_X2APIC])
957 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
958 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
960 static bool cpu_has_load_ia32_efer;
961 static bool cpu_has_load_perf_global_ctrl;
963 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
964 static DEFINE_SPINLOCK(vmx_vpid_lock);
966 static struct vmcs_config {
971 u32 pin_based_exec_ctrl;
972 u32 cpu_based_exec_ctrl;
973 u32 cpu_based_2nd_exec_ctrl;
978 static struct vmx_capability {
983 #define VMX_SEGMENT_FIELD(seg) \
984 [VCPU_SREG_##seg] = { \
985 .selector = GUEST_##seg##_SELECTOR, \
986 .base = GUEST_##seg##_BASE, \
987 .limit = GUEST_##seg##_LIMIT, \
988 .ar_bytes = GUEST_##seg##_AR_BYTES, \
991 static const struct kvm_vmx_segment_field {
996 } kvm_vmx_segment_fields[] = {
997 VMX_SEGMENT_FIELD(CS),
998 VMX_SEGMENT_FIELD(DS),
999 VMX_SEGMENT_FIELD(ES),
1000 VMX_SEGMENT_FIELD(FS),
1001 VMX_SEGMENT_FIELD(GS),
1002 VMX_SEGMENT_FIELD(SS),
1003 VMX_SEGMENT_FIELD(TR),
1004 VMX_SEGMENT_FIELD(LDTR),
1007 static u64 host_efer;
1009 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
1012 * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
1013 * away by decrementing the array size.
1015 static const u32 vmx_msr_index[] = {
1016 #ifdef CONFIG_X86_64
1017 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
1019 MSR_EFER, MSR_TSC_AUX, MSR_STAR,
1022 static inline bool is_exception_n(u32 intr_info, u8 vector)
1024 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1025 INTR_INFO_VALID_MASK)) ==
1026 (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
1029 static inline bool is_debug(u32 intr_info)
1031 return is_exception_n(intr_info, DB_VECTOR);
1034 static inline bool is_breakpoint(u32 intr_info)
1036 return is_exception_n(intr_info, BP_VECTOR);
1039 static inline bool is_page_fault(u32 intr_info)
1041 return is_exception_n(intr_info, PF_VECTOR);
1044 static inline bool is_no_device(u32 intr_info)
1046 return is_exception_n(intr_info, NM_VECTOR);
1049 static inline bool is_invalid_opcode(u32 intr_info)
1051 return is_exception_n(intr_info, UD_VECTOR);
1054 static inline bool is_external_interrupt(u32 intr_info)
1056 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1057 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
1060 static inline bool is_machine_check(u32 intr_info)
1062 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
1063 INTR_INFO_VALID_MASK)) ==
1064 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
1067 static inline bool cpu_has_vmx_msr_bitmap(void)
1069 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
1072 static inline bool cpu_has_vmx_tpr_shadow(void)
1074 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
1077 static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
1079 return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
1082 static inline bool cpu_has_secondary_exec_ctrls(void)
1084 return vmcs_config.cpu_based_exec_ctrl &
1085 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1088 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
1090 return vmcs_config.cpu_based_2nd_exec_ctrl &
1091 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
1094 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
1096 return vmcs_config.cpu_based_2nd_exec_ctrl &
1097 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
1100 static inline bool cpu_has_vmx_apic_register_virt(void)
1102 return vmcs_config.cpu_based_2nd_exec_ctrl &
1103 SECONDARY_EXEC_APIC_REGISTER_VIRT;
1106 static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1108 return vmcs_config.cpu_based_2nd_exec_ctrl &
1109 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1113 * Comment's format: document - errata name - stepping - processor name.
1115 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
1117 static u32 vmx_preemption_cpu_tfms[] = {
1118 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
1120 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */
1121 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
1122 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
1124 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
1126 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
1127 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
1129 * 320767.pdf - AAP86 - B1 -
1130 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
1133 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
1135 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
1137 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
1139 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
1140 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
1141 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
1145 static inline bool cpu_has_broken_vmx_preemption_timer(void)
1147 u32 eax = cpuid_eax(0x00000001), i;
1149 /* Clear the reserved bits */
1150 eax &= ~(0x3U << 14 | 0xfU << 28);
1151 for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
1152 if (eax == vmx_preemption_cpu_tfms[i])
1158 static inline bool cpu_has_vmx_preemption_timer(void)
1160 return vmcs_config.pin_based_exec_ctrl &
1161 PIN_BASED_VMX_PREEMPTION_TIMER;
1164 static inline bool cpu_has_vmx_posted_intr(void)
1166 return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
1167 vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
1170 static inline bool cpu_has_vmx_apicv(void)
1172 return cpu_has_vmx_apic_register_virt() &&
1173 cpu_has_vmx_virtual_intr_delivery() &&
1174 cpu_has_vmx_posted_intr();
1177 static inline bool cpu_has_vmx_flexpriority(void)
1179 return cpu_has_vmx_tpr_shadow() &&
1180 cpu_has_vmx_virtualize_apic_accesses();
1183 static inline bool cpu_has_vmx_ept_execute_only(void)
1185 return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
1188 static inline bool cpu_has_vmx_ept_2m_page(void)
1190 return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
1193 static inline bool cpu_has_vmx_ept_1g_page(void)
1195 return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
1198 static inline bool cpu_has_vmx_ept_4levels(void)
1200 return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
1203 static inline bool cpu_has_vmx_ept_mt_wb(void)
1205 return vmx_capability.ept & VMX_EPTP_WB_BIT;
1208 static inline bool cpu_has_vmx_ept_5levels(void)
1210 return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
1213 static inline bool cpu_has_vmx_ept_ad_bits(void)
1215 return vmx_capability.ept & VMX_EPT_AD_BIT;
1218 static inline bool cpu_has_vmx_invept_context(void)
1220 return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
1223 static inline bool cpu_has_vmx_invept_global(void)
1225 return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
1228 static inline bool cpu_has_vmx_invvpid_single(void)
1230 return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
1233 static inline bool cpu_has_vmx_invvpid_global(void)
1235 return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
1238 static inline bool cpu_has_vmx_invvpid(void)
1240 return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
1243 static inline bool cpu_has_vmx_ept(void)
1245 return vmcs_config.cpu_based_2nd_exec_ctrl &
1246 SECONDARY_EXEC_ENABLE_EPT;
1249 static inline bool cpu_has_vmx_unrestricted_guest(void)
1251 return vmcs_config.cpu_based_2nd_exec_ctrl &
1252 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1255 static inline bool cpu_has_vmx_ple(void)
1257 return vmcs_config.cpu_based_2nd_exec_ctrl &
1258 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
1261 static inline bool cpu_has_vmx_basic_inout(void)
1263 return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
1266 static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
1268 return flexpriority_enabled && lapic_in_kernel(vcpu);
1271 static inline bool cpu_has_vmx_vpid(void)
1273 return vmcs_config.cpu_based_2nd_exec_ctrl &
1274 SECONDARY_EXEC_ENABLE_VPID;
1277 static inline bool cpu_has_vmx_rdtscp(void)
1279 return vmcs_config.cpu_based_2nd_exec_ctrl &
1280 SECONDARY_EXEC_RDTSCP;
1283 static inline bool cpu_has_vmx_invpcid(void)
1285 return vmcs_config.cpu_based_2nd_exec_ctrl &
1286 SECONDARY_EXEC_ENABLE_INVPCID;
1289 static inline bool cpu_has_vmx_wbinvd_exit(void)
1291 return vmcs_config.cpu_based_2nd_exec_ctrl &
1292 SECONDARY_EXEC_WBINVD_EXITING;
1295 static inline bool cpu_has_vmx_shadow_vmcs(void)
1298 rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
1299 /* check if the cpu supports writing r/o exit information fields */
1300 if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
1303 return vmcs_config.cpu_based_2nd_exec_ctrl &
1304 SECONDARY_EXEC_SHADOW_VMCS;
1307 static inline bool cpu_has_vmx_pml(void)
1309 return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
1312 static inline bool cpu_has_vmx_tsc_scaling(void)
1314 return vmcs_config.cpu_based_2nd_exec_ctrl &
1315 SECONDARY_EXEC_TSC_SCALING;
1318 static inline bool cpu_has_vmx_vmfunc(void)
1320 return vmcs_config.cpu_based_2nd_exec_ctrl &
1321 SECONDARY_EXEC_ENABLE_VMFUNC;
1324 static inline bool report_flexpriority(void)
1326 return flexpriority_enabled;
1329 static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
1331 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.nested_vmx_misc_low);
1334 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
1336 return vmcs12->cpu_based_vm_exec_control & bit;
1339 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1341 return (vmcs12->cpu_based_vm_exec_control &
1342 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
1343 (vmcs12->secondary_vm_exec_control & bit);
1346 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1348 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1351 static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
1353 return vmcs12->pin_based_vm_exec_control &
1354 PIN_BASED_VMX_PREEMPTION_TIMER;
1357 static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1359 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1362 static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
1364 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
1367 static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
1369 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
1372 static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
1374 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
1377 static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
1379 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
1382 static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
1384 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
1387 static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
1389 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
1392 static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
1394 return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
1397 static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
1399 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
1402 static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
1404 return nested_cpu_has_vmfunc(vmcs12) &&
1405 (vmcs12->vm_function_control &
1406 VMX_VMFUNC_EPTP_SWITCHING);
1409 static inline bool is_nmi(u32 intr_info)
1411 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
1412 == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
1415 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1417 unsigned long exit_qualification);
1418 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1419 struct vmcs12 *vmcs12,
1420 u32 reason, unsigned long qualification);
1422 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
1426 for (i = 0; i < vmx->nmsrs; ++i)
1427 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
1432 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
1438 } operand = { vpid, 0, gva };
1440 asm volatile (__ex(ASM_VMX_INVVPID)
1441 /* CF==1 or ZF==1 --> rc = -1 */
1442 "; ja 1f ; ud2 ; 1:"
1443 : : "a"(&operand), "c"(ext) : "cc", "memory");
1446 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
1450 } operand = {eptp, gpa};
1452 asm volatile (__ex(ASM_VMX_INVEPT)
1453 /* CF==1 or ZF==1 --> rc = -1 */
1454 "; ja 1f ; ud2 ; 1:\n"
1455 : : "a" (&operand), "c" (ext) : "cc", "memory");
1458 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
1462 i = __find_msr_index(vmx, msr);
1464 return &vmx->guest_msrs[i];
1468 static void vmcs_clear(struct vmcs *vmcs)
1470 u64 phys_addr = __pa(vmcs);
1473 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
1474 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1477 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
1481 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
1483 vmcs_clear(loaded_vmcs->vmcs);
1484 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
1485 vmcs_clear(loaded_vmcs->shadow_vmcs);
1486 loaded_vmcs->cpu = -1;
1487 loaded_vmcs->launched = 0;
1490 static void vmcs_load(struct vmcs *vmcs)
1492 u64 phys_addr = __pa(vmcs);
1495 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1496 : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1499 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1503 #ifdef CONFIG_KEXEC_CORE
1505 * This bitmap is used to indicate whether the vmclear
1506 * operation is enabled on all cpus. All disabled by
1509 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1511 static inline void crash_enable_local_vmclear(int cpu)
1513 cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1516 static inline void crash_disable_local_vmclear(int cpu)
1518 cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1521 static inline int crash_local_vmclear_enabled(int cpu)
1523 return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1526 static void crash_vmclear_local_loaded_vmcss(void)
1528 int cpu = raw_smp_processor_id();
1529 struct loaded_vmcs *v;
1531 if (!crash_local_vmclear_enabled(cpu))
1534 list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1535 loaded_vmcss_on_cpu_link)
1536 vmcs_clear(v->vmcs);
1539 static inline void crash_enable_local_vmclear(int cpu) { }
1540 static inline void crash_disable_local_vmclear(int cpu) { }
1541 #endif /* CONFIG_KEXEC_CORE */
1543 static void __loaded_vmcs_clear(void *arg)
1545 struct loaded_vmcs *loaded_vmcs = arg;
1546 int cpu = raw_smp_processor_id();
1548 if (loaded_vmcs->cpu != cpu)
1549 return; /* vcpu migration can race with cpu offline */
1550 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1551 per_cpu(current_vmcs, cpu) = NULL;
1552 crash_disable_local_vmclear(cpu);
1553 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1556 * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1557 * is before setting loaded_vmcs->vcpu to -1 which is done in
1558 * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1559 * then adds the vmcs into percpu list before it is deleted.
1563 loaded_vmcs_init(loaded_vmcs);
1564 crash_enable_local_vmclear(cpu);
1567 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1569 int cpu = loaded_vmcs->cpu;
1572 smp_call_function_single(cpu,
1573 __loaded_vmcs_clear, loaded_vmcs, 1);
1576 static inline void vpid_sync_vcpu_single(int vpid)
1581 if (cpu_has_vmx_invvpid_single())
1582 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
1585 static inline void vpid_sync_vcpu_global(void)
1587 if (cpu_has_vmx_invvpid_global())
1588 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1591 static inline void vpid_sync_context(int vpid)
1593 if (cpu_has_vmx_invvpid_single())
1594 vpid_sync_vcpu_single(vpid);
1596 vpid_sync_vcpu_global();
1599 static inline void ept_sync_global(void)
1601 if (cpu_has_vmx_invept_global())
1602 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1605 static inline void ept_sync_context(u64 eptp)
1608 if (cpu_has_vmx_invept_context())
1609 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1615 static __always_inline void vmcs_check16(unsigned long field)
1617 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1618 "16-bit accessor invalid for 64-bit field");
1619 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1620 "16-bit accessor invalid for 64-bit high field");
1621 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1622 "16-bit accessor invalid for 32-bit high field");
1623 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1624 "16-bit accessor invalid for natural width field");
1627 static __always_inline void vmcs_check32(unsigned long field)
1629 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1630 "32-bit accessor invalid for 16-bit field");
1631 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1632 "32-bit accessor invalid for natural width field");
1635 static __always_inline void vmcs_check64(unsigned long field)
1637 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1638 "64-bit accessor invalid for 16-bit field");
1639 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1640 "64-bit accessor invalid for 64-bit high field");
1641 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1642 "64-bit accessor invalid for 32-bit field");
1643 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
1644 "64-bit accessor invalid for natural width field");
1647 static __always_inline void vmcs_checkl(unsigned long field)
1649 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
1650 "Natural width accessor invalid for 16-bit field");
1651 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
1652 "Natural width accessor invalid for 64-bit field");
1653 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
1654 "Natural width accessor invalid for 64-bit high field");
1655 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
1656 "Natural width accessor invalid for 32-bit field");
1659 static __always_inline unsigned long __vmcs_readl(unsigned long field)
1661 unsigned long value;
1663 asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1664 : "=a"(value) : "d"(field) : "cc");
1668 static __always_inline u16 vmcs_read16(unsigned long field)
1670 vmcs_check16(field);
1671 return __vmcs_readl(field);
1674 static __always_inline u32 vmcs_read32(unsigned long field)
1676 vmcs_check32(field);
1677 return __vmcs_readl(field);
1680 static __always_inline u64 vmcs_read64(unsigned long field)
1682 vmcs_check64(field);
1683 #ifdef CONFIG_X86_64
1684 return __vmcs_readl(field);
1686 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
1690 static __always_inline unsigned long vmcs_readl(unsigned long field)
1693 return __vmcs_readl(field);
1696 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1698 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1699 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1703 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
1707 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1708 : "=q"(error) : "a"(value), "d"(field) : "cc");
1709 if (unlikely(error))
1710 vmwrite_error(field, value);
1713 static __always_inline void vmcs_write16(unsigned long field, u16 value)
1715 vmcs_check16(field);
1716 __vmcs_writel(field, value);
1719 static __always_inline void vmcs_write32(unsigned long field, u32 value)
1721 vmcs_check32(field);
1722 __vmcs_writel(field, value);
1725 static __always_inline void vmcs_write64(unsigned long field, u64 value)
1727 vmcs_check64(field);
1728 __vmcs_writel(field, value);
1729 #ifndef CONFIG_X86_64
1731 __vmcs_writel(field+1, value >> 32);
1735 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
1738 __vmcs_writel(field, value);
1741 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
1743 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1744 "vmcs_clear_bits does not support 64-bit fields");
1745 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
1748 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
1750 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
1751 "vmcs_set_bits does not support 64-bit fields");
1752 __vmcs_writel(field, __vmcs_readl(field) | mask);
1755 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
1757 vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
1760 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
1762 vmcs_write32(VM_ENTRY_CONTROLS, val);
1763 vmx->vm_entry_controls_shadow = val;
1766 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
1768 if (vmx->vm_entry_controls_shadow != val)
1769 vm_entry_controls_init(vmx, val);
1772 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
1774 return vmx->vm_entry_controls_shadow;
1778 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1780 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
1783 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1785 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
1788 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
1790 vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
1793 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
1795 vmcs_write32(VM_EXIT_CONTROLS, val);
1796 vmx->vm_exit_controls_shadow = val;
1799 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
1801 if (vmx->vm_exit_controls_shadow != val)
1802 vm_exit_controls_init(vmx, val);
1805 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
1807 return vmx->vm_exit_controls_shadow;
1811 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
1813 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
1816 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
1818 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
1821 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1823 vmx->segment_cache.bitmask = 0;
1826 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1830 u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1832 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1833 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1834 vmx->segment_cache.bitmask = 0;
1836 ret = vmx->segment_cache.bitmask & mask;
1837 vmx->segment_cache.bitmask |= mask;
1841 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1843 u16 *p = &vmx->segment_cache.seg[seg].selector;
1845 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1846 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1850 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1852 ulong *p = &vmx->segment_cache.seg[seg].base;
1854 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1855 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1859 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1861 u32 *p = &vmx->segment_cache.seg[seg].limit;
1863 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1864 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1868 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1870 u32 *p = &vmx->segment_cache.seg[seg].ar;
1872 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1873 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1877 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1881 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1882 (1u << DB_VECTOR) | (1u << AC_VECTOR);
1883 if ((vcpu->guest_debug &
1884 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1885 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1886 eb |= 1u << BP_VECTOR;
1887 if (to_vmx(vcpu)->rmode.vm86_active)
1890 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1892 /* When we are running a nested L2 guest and L1 specified for it a
1893 * certain exception bitmap, we must trap the same exceptions and pass
1894 * them to L1. When running L2, we will only handle the exceptions
1895 * specified above if L1 did not want them.
1897 if (is_guest_mode(vcpu))
1898 eb |= get_vmcs12(vcpu)->exception_bitmap;
1900 vmcs_write32(EXCEPTION_BITMAP, eb);
1903 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1904 unsigned long entry, unsigned long exit)
1906 vm_entry_controls_clearbit(vmx, entry);
1907 vm_exit_controls_clearbit(vmx, exit);
1910 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1913 struct msr_autoload *m = &vmx->msr_autoload;
1917 if (cpu_has_load_ia32_efer) {
1918 clear_atomic_switch_msr_special(vmx,
1919 VM_ENTRY_LOAD_IA32_EFER,
1920 VM_EXIT_LOAD_IA32_EFER);
1924 case MSR_CORE_PERF_GLOBAL_CTRL:
1925 if (cpu_has_load_perf_global_ctrl) {
1926 clear_atomic_switch_msr_special(vmx,
1927 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1928 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1934 for (i = 0; i < m->nr; ++i)
1935 if (m->guest[i].index == msr)
1941 m->guest[i] = m->guest[m->nr];
1942 m->host[i] = m->host[m->nr];
1943 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1944 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1947 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1948 unsigned long entry, unsigned long exit,
1949 unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
1950 u64 guest_val, u64 host_val)
1952 vmcs_write64(guest_val_vmcs, guest_val);
1953 vmcs_write64(host_val_vmcs, host_val);
1954 vm_entry_controls_setbit(vmx, entry);
1955 vm_exit_controls_setbit(vmx, exit);
1958 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1959 u64 guest_val, u64 host_val)
1962 struct msr_autoload *m = &vmx->msr_autoload;
1966 if (cpu_has_load_ia32_efer) {
1967 add_atomic_switch_msr_special(vmx,
1968 VM_ENTRY_LOAD_IA32_EFER,
1969 VM_EXIT_LOAD_IA32_EFER,
1972 guest_val, host_val);
1976 case MSR_CORE_PERF_GLOBAL_CTRL:
1977 if (cpu_has_load_perf_global_ctrl) {
1978 add_atomic_switch_msr_special(vmx,
1979 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1980 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1981 GUEST_IA32_PERF_GLOBAL_CTRL,
1982 HOST_IA32_PERF_GLOBAL_CTRL,
1983 guest_val, host_val);
1987 case MSR_IA32_PEBS_ENABLE:
1988 /* PEBS needs a quiescent period after being disabled (to write
1989 * a record). Disabling PEBS through VMX MSR swapping doesn't
1990 * provide that period, so a CPU could write host's record into
1993 wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1996 for (i = 0; i < m->nr; ++i)
1997 if (m->guest[i].index == msr)
2000 if (i == NR_AUTOLOAD_MSRS) {
2001 printk_once(KERN_WARNING "Not enough msr switch entries. "
2002 "Can't add msr %x\n", msr);
2004 } else if (i == m->nr) {
2006 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2007 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2010 m->guest[i].index = msr;
2011 m->guest[i].value = guest_val;
2012 m->host[i].index = msr;
2013 m->host[i].value = host_val;
2016 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
2018 u64 guest_efer = vmx->vcpu.arch.efer;
2019 u64 ignore_bits = 0;
2023 * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
2024 * host CPUID is more efficient than testing guest CPUID
2025 * or CR4. Host SMEP is anyway a requirement for guest SMEP.
2027 if (boot_cpu_has(X86_FEATURE_SMEP))
2028 guest_efer |= EFER_NX;
2029 else if (!(guest_efer & EFER_NX))
2030 ignore_bits |= EFER_NX;
2034 * LMA and LME handled by hardware; SCE meaningless outside long mode.
2036 ignore_bits |= EFER_SCE;
2037 #ifdef CONFIG_X86_64
2038 ignore_bits |= EFER_LMA | EFER_LME;
2039 /* SCE is meaningful only in long mode on Intel */
2040 if (guest_efer & EFER_LMA)
2041 ignore_bits &= ~(u64)EFER_SCE;
2044 clear_atomic_switch_msr(vmx, MSR_EFER);
2047 * On EPT, we can't emulate NX, so we must switch EFER atomically.
2048 * On CPUs that support "load IA32_EFER", always switch EFER
2049 * atomically, since it's faster than switching it manually.
2051 if (cpu_has_load_ia32_efer ||
2052 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
2053 if (!(guest_efer & EFER_LMA))
2054 guest_efer &= ~EFER_LME;
2055 if (guest_efer != host_efer)
2056 add_atomic_switch_msr(vmx, MSR_EFER,
2057 guest_efer, host_efer);
2060 guest_efer &= ~ignore_bits;
2061 guest_efer |= host_efer & ignore_bits;
2063 vmx->guest_msrs[efer_offset].data = guest_efer;
2064 vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
2070 #ifdef CONFIG_X86_32
2072 * On 32-bit kernels, VM exits still load the FS and GS bases from the
2073 * VMCS rather than the segment table. KVM uses this helper to figure
2074 * out the current bases to poke them into the VMCS before entry.
2076 static unsigned long segment_base(u16 selector)
2078 struct desc_struct *table;
2081 if (!(selector & ~SEGMENT_RPL_MASK))
2084 table = get_current_gdt_ro();
2086 if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2087 u16 ldt_selector = kvm_read_ldt();
2089 if (!(ldt_selector & ~SEGMENT_RPL_MASK))
2092 table = (struct desc_struct *)segment_base(ldt_selector);
2094 v = get_desc_base(&table[selector >> 3]);
2099 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
2101 struct vcpu_vmx *vmx = to_vmx(vcpu);
2104 if (vmx->host_state.loaded)
2107 vmx->host_state.loaded = 1;
2109 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
2110 * allow segment selectors with cpl > 0 or ti == 1.
2112 vmx->host_state.ldt_sel = kvm_read_ldt();
2113 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
2114 savesegment(fs, vmx->host_state.fs_sel);
2115 if (!(vmx->host_state.fs_sel & 7)) {
2116 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
2117 vmx->host_state.fs_reload_needed = 0;
2119 vmcs_write16(HOST_FS_SELECTOR, 0);
2120 vmx->host_state.fs_reload_needed = 1;
2122 savesegment(gs, vmx->host_state.gs_sel);
2123 if (!(vmx->host_state.gs_sel & 7))
2124 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
2126 vmcs_write16(HOST_GS_SELECTOR, 0);
2127 vmx->host_state.gs_ldt_reload_needed = 1;
2130 #ifdef CONFIG_X86_64
2131 savesegment(ds, vmx->host_state.ds_sel);
2132 savesegment(es, vmx->host_state.es_sel);
2135 #ifdef CONFIG_X86_64
2136 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
2137 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
2139 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
2140 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
2143 #ifdef CONFIG_X86_64
2144 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2145 if (is_long_mode(&vmx->vcpu))
2146 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2148 if (boot_cpu_has(X86_FEATURE_MPX))
2149 rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2150 for (i = 0; i < vmx->save_nmsrs; ++i)
2151 kvm_set_shared_msr(vmx->guest_msrs[i].index,
2152 vmx->guest_msrs[i].data,
2153 vmx->guest_msrs[i].mask);
2156 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
2158 if (!vmx->host_state.loaded)
2161 ++vmx->vcpu.stat.host_state_reload;
2162 vmx->host_state.loaded = 0;
2163 #ifdef CONFIG_X86_64
2164 if (is_long_mode(&vmx->vcpu))
2165 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
2167 if (vmx->host_state.gs_ldt_reload_needed) {
2168 kvm_load_ldt(vmx->host_state.ldt_sel);
2169 #ifdef CONFIG_X86_64
2170 load_gs_index(vmx->host_state.gs_sel);
2172 loadsegment(gs, vmx->host_state.gs_sel);
2175 if (vmx->host_state.fs_reload_needed)
2176 loadsegment(fs, vmx->host_state.fs_sel);
2177 #ifdef CONFIG_X86_64
2178 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
2179 loadsegment(ds, vmx->host_state.ds_sel);
2180 loadsegment(es, vmx->host_state.es_sel);
2183 invalidate_tss_limit();
2184 #ifdef CONFIG_X86_64
2185 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
2187 if (vmx->host_state.msr_host_bndcfgs)
2188 wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
2189 load_fixmap_gdt(raw_smp_processor_id());
2192 static void vmx_load_host_state(struct vcpu_vmx *vmx)
2195 __vmx_load_host_state(vmx);
2199 static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
2201 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2202 struct pi_desc old, new;
2206 * In case of hot-plug or hot-unplug, we may have to undo
2207 * vmx_vcpu_pi_put even if there is no assigned device. And we
2208 * always keep PI.NDST up to date for simplicity: it makes the
2209 * code easier, and CPU migration is not a fast path.
2211 if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
2215 * First handle the simple case where no cmpxchg is necessary; just
2216 * allow posting non-urgent interrupts.
2218 * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
2219 * PI.NDST: pi_post_block will do it for us and the wakeup_handler
2220 * expects the VCPU to be on the blocked_vcpu_list that matches
2223 if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
2225 pi_clear_sn(pi_desc);
2229 /* The full case. */
2231 old.control = new.control = pi_desc->control;
2233 dest = cpu_physical_id(cpu);
2235 if (x2apic_enabled())
2238 new.ndst = (dest << 8) & 0xFF00;
2241 } while (cmpxchg64(&pi_desc->control, old.control,
2242 new.control) != old.control);
2245 static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
2247 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
2248 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
2252 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
2253 * vcpu mutex is already taken.
2255 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2257 struct vcpu_vmx *vmx = to_vmx(vcpu);
2258 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
2260 if (!already_loaded) {
2261 loaded_vmcs_clear(vmx->loaded_vmcs);
2262 local_irq_disable();
2263 crash_disable_local_vmclear(cpu);
2266 * Read loaded_vmcs->cpu should be before fetching
2267 * loaded_vmcs->loaded_vmcss_on_cpu_link.
2268 * See the comments in __loaded_vmcs_clear().
2272 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
2273 &per_cpu(loaded_vmcss_on_cpu, cpu));
2274 crash_enable_local_vmclear(cpu);
2278 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
2279 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
2280 vmcs_load(vmx->loaded_vmcs->vmcs);
2283 if (!already_loaded) {
2284 void *gdt = get_current_gdt_ro();
2285 unsigned long sysenter_esp;
2287 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2290 * Linux uses per-cpu TSS and GDT, so set these when switching
2291 * processors. See 22.2.4.
2293 vmcs_writel(HOST_TR_BASE,
2294 (unsigned long)this_cpu_ptr(&cpu_tss));
2295 vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
2298 * VM exits change the host TR limit to 0x67 after a VM
2299 * exit. This is okay, since 0x67 covers everything except
2300 * the IO bitmap and have have code to handle the IO bitmap
2301 * being lost after a VM exit.
2303 BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
2305 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
2306 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
2308 vmx->loaded_vmcs->cpu = cpu;
2311 /* Setup TSC multiplier */
2312 if (kvm_has_tsc_control &&
2313 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
2314 decache_tsc_multiplier(vmx);
2316 vmx_vcpu_pi_load(vcpu, cpu);
2317 vmx->host_pkru = read_pkru();
2320 static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
2322 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
2324 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
2325 !irq_remapping_cap(IRQ_POSTING_CAP) ||
2326 !kvm_vcpu_apicv_active(vcpu))
2329 /* Set SN when the vCPU is preempted */
2330 if (vcpu->preempted)
2334 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
2336 vmx_vcpu_pi_put(vcpu);
2338 __vmx_load_host_state(to_vmx(vcpu));
2341 static bool emulation_required(struct kvm_vcpu *vcpu)
2343 return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2346 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
2349 * Return the cr0 value that a nested guest would read. This is a combination
2350 * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
2351 * its hypervisor (cr0_read_shadow).
2353 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
2355 return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
2356 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
2358 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
2360 return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
2361 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
2364 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
2366 unsigned long rflags, save_rflags;
2368 if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
2369 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2370 rflags = vmcs_readl(GUEST_RFLAGS);
2371 if (to_vmx(vcpu)->rmode.vm86_active) {
2372 rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2373 save_rflags = to_vmx(vcpu)->rmode.save_rflags;
2374 rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2376 to_vmx(vcpu)->rflags = rflags;
2378 return to_vmx(vcpu)->rflags;
2381 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2383 unsigned long old_rflags = vmx_get_rflags(vcpu);
2385 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
2386 to_vmx(vcpu)->rflags = rflags;
2387 if (to_vmx(vcpu)->rmode.vm86_active) {
2388 to_vmx(vcpu)->rmode.save_rflags = rflags;
2389 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2391 vmcs_writel(GUEST_RFLAGS, rflags);
2393 if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
2394 to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
2397 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2399 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2402 if (interruptibility & GUEST_INTR_STATE_STI)
2403 ret |= KVM_X86_SHADOW_INT_STI;
2404 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
2405 ret |= KVM_X86_SHADOW_INT_MOV_SS;
2410 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
2412 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2413 u32 interruptibility = interruptibility_old;
2415 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
2417 if (mask & KVM_X86_SHADOW_INT_MOV_SS)
2418 interruptibility |= GUEST_INTR_STATE_MOV_SS;
2419 else if (mask & KVM_X86_SHADOW_INT_STI)
2420 interruptibility |= GUEST_INTR_STATE_STI;
2422 if ((interruptibility != interruptibility_old))
2423 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
2426 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
2430 rip = kvm_rip_read(vcpu);
2431 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
2432 kvm_rip_write(vcpu, rip);
2434 /* skipping an emulated instruction also counts */
2435 vmx_set_interrupt_shadow(vcpu, 0);
2438 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
2439 unsigned long exit_qual)
2441 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2442 unsigned int nr = vcpu->arch.exception.nr;
2443 u32 intr_info = nr | INTR_INFO_VALID_MASK;
2445 if (vcpu->arch.exception.has_error_code) {
2446 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
2447 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2450 if (kvm_exception_is_soft(nr))
2451 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2453 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2455 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
2456 vmx_get_nmi_mask(vcpu))
2457 intr_info |= INTR_INFO_UNBLOCK_NMI;
2459 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
2463 * KVM wants to inject page-faults which it got to the guest. This function
2464 * checks whether in a nested guest, we need to inject them to L1 or L2.
2466 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
2468 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2469 unsigned int nr = vcpu->arch.exception.nr;
2471 if (nr == PF_VECTOR) {
2472 if (vcpu->arch.exception.nested_apf) {
2473 *exit_qual = vcpu->arch.apf.nested_apf_token;
2477 * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
2478 * The fix is to add the ancillary datum (CR2 or DR6) to structs
2479 * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
2480 * can be written only when inject_pending_event runs. This should be
2481 * conditional on a new capability---if the capability is disabled,
2482 * kvm_multiple_exception would write the ancillary information to
2483 * CR2 or DR6, for backwards ABI-compatibility.
2485 if (nested_vmx_is_page_fault_vmexit(vmcs12,
2486 vcpu->arch.exception.error_code)) {
2487 *exit_qual = vcpu->arch.cr2;
2491 if (vmcs12->exception_bitmap & (1u << nr)) {
2492 if (nr == DB_VECTOR)
2493 *exit_qual = vcpu->arch.dr6;
2503 static void vmx_queue_exception(struct kvm_vcpu *vcpu)
2505 struct vcpu_vmx *vmx = to_vmx(vcpu);
2506 unsigned nr = vcpu->arch.exception.nr;
2507 bool has_error_code = vcpu->arch.exception.has_error_code;
2508 u32 error_code = vcpu->arch.exception.error_code;
2509 u32 intr_info = nr | INTR_INFO_VALID_MASK;
2511 if (has_error_code) {
2512 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
2513 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
2516 if (vmx->rmode.vm86_active) {
2518 if (kvm_exception_is_soft(nr))
2519 inc_eip = vcpu->arch.event_exit_inst_len;
2520 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
2521 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2525 if (kvm_exception_is_soft(nr)) {
2526 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2527 vmx->vcpu.arch.event_exit_inst_len);
2528 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
2530 intr_info |= INTR_TYPE_HARD_EXCEPTION;
2532 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
2535 static bool vmx_rdtscp_supported(void)
2537 return cpu_has_vmx_rdtscp();
2540 static bool vmx_invpcid_supported(void)
2542 return cpu_has_vmx_invpcid() && enable_ept;
2546 * Swap MSR entry in host/guest MSR entry array.
2548 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
2550 struct shared_msr_entry tmp;
2552 tmp = vmx->guest_msrs[to];
2553 vmx->guest_msrs[to] = vmx->guest_msrs[from];
2554 vmx->guest_msrs[from] = tmp;
2557 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
2559 unsigned long *msr_bitmap;
2561 if (is_guest_mode(vcpu))
2562 msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
2563 else if (cpu_has_secondary_exec_ctrls() &&
2564 (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
2565 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
2566 if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) {
2567 if (is_long_mode(vcpu))
2568 msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv;
2570 msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv;
2572 if (is_long_mode(vcpu))
2573 msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
2575 msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
2578 if (is_long_mode(vcpu))
2579 msr_bitmap = vmx_msr_bitmap_longmode;
2581 msr_bitmap = vmx_msr_bitmap_legacy;
2584 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
2588 * Set up the vmcs to automatically save and restore system
2589 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
2590 * mode, as fiddling with msrs is very expensive.
2592 static void setup_msrs(struct vcpu_vmx *vmx)
2594 int save_nmsrs, index;
2597 #ifdef CONFIG_X86_64
2598 if (is_long_mode(&vmx->vcpu)) {
2599 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
2601 move_msr_up(vmx, index, save_nmsrs++);
2602 index = __find_msr_index(vmx, MSR_LSTAR);
2604 move_msr_up(vmx, index, save_nmsrs++);
2605 index = __find_msr_index(vmx, MSR_CSTAR);
2607 move_msr_up(vmx, index, save_nmsrs++);
2608 index = __find_msr_index(vmx, MSR_TSC_AUX);
2609 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
2610 move_msr_up(vmx, index, save_nmsrs++);
2612 * MSR_STAR is only needed on long mode guests, and only
2613 * if efer.sce is enabled.
2615 index = __find_msr_index(vmx, MSR_STAR);
2616 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
2617 move_msr_up(vmx, index, save_nmsrs++);
2620 index = __find_msr_index(vmx, MSR_EFER);
2621 if (index >= 0 && update_transition_efer(vmx, index))
2622 move_msr_up(vmx, index, save_nmsrs++);
2624 vmx->save_nmsrs = save_nmsrs;
2626 if (cpu_has_vmx_msr_bitmap())
2627 vmx_set_msr_bitmap(&vmx->vcpu);
2631 * reads and returns guest's timestamp counter "register"
2632 * guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
2633 * -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
2635 static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
2637 u64 host_tsc, tsc_offset;
2640 tsc_offset = vmcs_read64(TSC_OFFSET);
2641 return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
2645 * writes 'offset' into guest's timestamp counter offset register
2647 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
2649 if (is_guest_mode(vcpu)) {
2651 * We're here if L1 chose not to trap WRMSR to TSC. According
2652 * to the spec, this should set L1's TSC; The offset that L1
2653 * set for L2 remains unchanged, and still needs to be added
2654 * to the newly set TSC to get L2's TSC.
2656 struct vmcs12 *vmcs12;
2657 /* recalculate vmcs02.TSC_OFFSET: */
2658 vmcs12 = get_vmcs12(vcpu);
2659 vmcs_write64(TSC_OFFSET, offset +
2660 (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
2661 vmcs12->tsc_offset : 0));
2663 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2664 vmcs_read64(TSC_OFFSET), offset);
2665 vmcs_write64(TSC_OFFSET, offset);
2670 * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
2671 * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
2672 * all guests if the "nested" module option is off, and can also be disabled
2673 * for a single guest by disabling its VMX cpuid bit.
2675 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2677 return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
2681 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2682 * returned for the various VMX controls MSRs when nested VMX is enabled.
2683 * The same values should also be used to verify that vmcs12 control fields are
2684 * valid during nested entry from L1 to L2.
2685 * Each of these control msrs has a low and high 32-bit half: A low bit is on
2686 * if the corresponding bit in the (32-bit) control field *must* be on, and a
2687 * bit in the high half is on if the corresponding bit in the control field
2688 * may be on. See also vmx_control_verify().
2690 static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2693 * Note that as a general rule, the high half of the MSRs (bits in
2694 * the control fields which may be 1) should be initialized by the
2695 * intersection of the underlying hardware's MSR (i.e., features which
2696 * can be supported) and the list of features we want to expose -
2697 * because they are known to be properly supported in our code.
2698 * Also, usually, the low half of the MSRs (bits which must be 1) can
2699 * be set to 0, meaning that L1 may turn off any of these bits. The
2700 * reason is that if one of these bits is necessary, it will appear
2701 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2702 * fields of vmcs01 and vmcs02, will turn these bits off - and
2703 * nested_vmx_exit_reflected() will not pass related exits to L1.
2704 * These rules have exceptions below.
2707 /* pin-based controls */
2708 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
2709 vmx->nested.nested_vmx_pinbased_ctls_low,
2710 vmx->nested.nested_vmx_pinbased_ctls_high);
2711 vmx->nested.nested_vmx_pinbased_ctls_low |=
2712 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2713 vmx->nested.nested_vmx_pinbased_ctls_high &=
2714 PIN_BASED_EXT_INTR_MASK |
2715 PIN_BASED_NMI_EXITING |
2716 PIN_BASED_VIRTUAL_NMIS;
2717 vmx->nested.nested_vmx_pinbased_ctls_high |=
2718 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2719 PIN_BASED_VMX_PREEMPTION_TIMER;
2720 if (kvm_vcpu_apicv_active(&vmx->vcpu))
2721 vmx->nested.nested_vmx_pinbased_ctls_high |=
2722 PIN_BASED_POSTED_INTR;
2725 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2726 vmx->nested.nested_vmx_exit_ctls_low,
2727 vmx->nested.nested_vmx_exit_ctls_high);
2728 vmx->nested.nested_vmx_exit_ctls_low =
2729 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2731 vmx->nested.nested_vmx_exit_ctls_high &=
2732 #ifdef CONFIG_X86_64
2733 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2735 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2736 vmx->nested.nested_vmx_exit_ctls_high |=
2737 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2738 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2739 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2741 if (kvm_mpx_supported())
2742 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2744 /* We support free control of debug control saving. */
2745 vmx->nested.nested_vmx_exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
2747 /* entry controls */
2748 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2749 vmx->nested.nested_vmx_entry_ctls_low,
2750 vmx->nested.nested_vmx_entry_ctls_high);
2751 vmx->nested.nested_vmx_entry_ctls_low =
2752 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2753 vmx->nested.nested_vmx_entry_ctls_high &=
2754 #ifdef CONFIG_X86_64
2755 VM_ENTRY_IA32E_MODE |
2757 VM_ENTRY_LOAD_IA32_PAT;
2758 vmx->nested.nested_vmx_entry_ctls_high |=
2759 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2760 if (kvm_mpx_supported())
2761 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2763 /* We support free control of debug control loading. */
2764 vmx->nested.nested_vmx_entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
2766 /* cpu-based controls */
2767 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2768 vmx->nested.nested_vmx_procbased_ctls_low,
2769 vmx->nested.nested_vmx_procbased_ctls_high);
2770 vmx->nested.nested_vmx_procbased_ctls_low =
2771 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
2772 vmx->nested.nested_vmx_procbased_ctls_high &=
2773 CPU_BASED_VIRTUAL_INTR_PENDING |
2774 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2775 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2776 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2777 CPU_BASED_CR3_STORE_EXITING |
2778 #ifdef CONFIG_X86_64
2779 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2781 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2782 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
2783 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
2784 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
2785 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2787 * We can allow some features even when not supported by the
2788 * hardware. For example, L1 can specify an MSR bitmap - and we
2789 * can use it to avoid exits to L1 - even when L0 runs L2
2790 * without MSR bitmaps.
2792 vmx->nested.nested_vmx_procbased_ctls_high |=
2793 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2794 CPU_BASED_USE_MSR_BITMAPS;
2796 /* We support free control of CR3 access interception. */
2797 vmx->nested.nested_vmx_procbased_ctls_low &=
2798 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
2801 * secondary cpu-based controls. Do not include those that
2802 * depend on CPUID bits, they are added later by vmx_cpuid_update.
2804 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2805 vmx->nested.nested_vmx_secondary_ctls_low,
2806 vmx->nested.nested_vmx_secondary_ctls_high);
2807 vmx->nested.nested_vmx_secondary_ctls_low = 0;
2808 vmx->nested.nested_vmx_secondary_ctls_high &=
2809 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2810 SECONDARY_EXEC_DESC |
2811 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2812 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2813 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2814 SECONDARY_EXEC_WBINVD_EXITING;
2817 /* nested EPT: emulate EPT also to L1 */
2818 vmx->nested.nested_vmx_secondary_ctls_high |=
2819 SECONDARY_EXEC_ENABLE_EPT;
2820 vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2821 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
2822 if (cpu_has_vmx_ept_execute_only())
2823 vmx->nested.nested_vmx_ept_caps |=
2824 VMX_EPT_EXECUTE_ONLY_BIT;
2825 vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
2826 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
2827 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
2828 VMX_EPT_1GB_PAGE_BIT;
2829 if (enable_ept_ad_bits) {
2830 vmx->nested.nested_vmx_secondary_ctls_high |=
2831 SECONDARY_EXEC_ENABLE_PML;
2832 vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT;
2835 vmx->nested.nested_vmx_ept_caps = 0;
2837 if (cpu_has_vmx_vmfunc()) {
2838 vmx->nested.nested_vmx_secondary_ctls_high |=
2839 SECONDARY_EXEC_ENABLE_VMFUNC;
2841 * Advertise EPTP switching unconditionally
2842 * since we emulate it
2844 vmx->nested.nested_vmx_vmfunc_controls =
2845 VMX_VMFUNC_EPTP_SWITCHING;
2849 * Old versions of KVM use the single-context version without
2850 * checking for support, so declare that it is supported even
2851 * though it is treated as global context. The alternative is
2852 * not failing the single-context invvpid, and it is worse.
2855 vmx->nested.nested_vmx_secondary_ctls_high |=
2856 SECONDARY_EXEC_ENABLE_VPID;
2857 vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
2858 VMX_VPID_EXTENT_SUPPORTED_MASK;
2860 vmx->nested.nested_vmx_vpid_caps = 0;
2862 if (enable_unrestricted_guest)
2863 vmx->nested.nested_vmx_secondary_ctls_high |=
2864 SECONDARY_EXEC_UNRESTRICTED_GUEST;
2866 /* miscellaneous data */
2867 rdmsr(MSR_IA32_VMX_MISC,
2868 vmx->nested.nested_vmx_misc_low,
2869 vmx->nested.nested_vmx_misc_high);
2870 vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2871 vmx->nested.nested_vmx_misc_low |=
2872 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
2873 VMX_MISC_ACTIVITY_HLT;
2874 vmx->nested.nested_vmx_misc_high = 0;
2877 * This MSR reports some information about VMX support. We
2878 * should return information about the VMX we emulate for the
2879 * guest, and the VMCS structure we give it - not about the
2880 * VMX support of the underlying hardware.
2882 vmx->nested.nested_vmx_basic =
2884 VMX_BASIC_TRUE_CTLS |
2885 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2886 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2888 if (cpu_has_vmx_basic_inout())
2889 vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
2892 * These MSRs specify bits which the guest must keep fixed on
2893 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2894 * We picked the standard core2 setting.
2896 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2897 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
2898 vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
2899 vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
2901 /* These MSRs specify bits which the guest must keep fixed off. */
2902 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
2903 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
2905 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
2906 vmx->nested.nested_vmx_vmcs_enum = 0x2e;
2910 * if fixed0[i] == 1: val[i] must be 1
2911 * if fixed1[i] == 0: val[i] must be 0
2913 static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
2915 return ((val & fixed1) | fixed0) == val;
2918 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2920 return fixed_bits_valid(control, low, high);
2923 static inline u64 vmx_control_msr(u32 low, u32 high)
2925 return low | ((u64)high << 32);
2928 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
2933 return (superset | subset) == superset;
2936 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
2938 const u64 feature_and_reserved =
2939 /* feature (except bit 48; see below) */
2940 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
2942 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
2943 u64 vmx_basic = vmx->nested.nested_vmx_basic;
2945 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
2949 * KVM does not emulate a version of VMX that constrains physical
2950 * addresses of VMX structures (e.g. VMCS) to 32-bits.
2952 if (data & BIT_ULL(48))
2955 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
2956 vmx_basic_vmcs_revision_id(data))
2959 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
2962 vmx->nested.nested_vmx_basic = data;
2967 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
2972 switch (msr_index) {
2973 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2974 lowp = &vmx->nested.nested_vmx_pinbased_ctls_low;
2975 highp = &vmx->nested.nested_vmx_pinbased_ctls_high;
2977 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2978 lowp = &vmx->nested.nested_vmx_procbased_ctls_low;
2979 highp = &vmx->nested.nested_vmx_procbased_ctls_high;
2981 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2982 lowp = &vmx->nested.nested_vmx_exit_ctls_low;
2983 highp = &vmx->nested.nested_vmx_exit_ctls_high;
2985 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2986 lowp = &vmx->nested.nested_vmx_entry_ctls_low;
2987 highp = &vmx->nested.nested_vmx_entry_ctls_high;
2989 case MSR_IA32_VMX_PROCBASED_CTLS2:
2990 lowp = &vmx->nested.nested_vmx_secondary_ctls_low;
2991 highp = &vmx->nested.nested_vmx_secondary_ctls_high;
2997 supported = vmx_control_msr(*lowp, *highp);
2999 /* Check must-be-1 bits are still 1. */
3000 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
3003 /* Check must-be-0 bits are still 0. */
3004 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
3008 *highp = data >> 32;
3012 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
3014 const u64 feature_and_reserved_bits =
3016 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
3017 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
3019 GENMASK_ULL(13, 9) | BIT_ULL(31);
3022 vmx_misc = vmx_control_msr(vmx->nested.nested_vmx_misc_low,
3023 vmx->nested.nested_vmx_misc_high);
3025 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
3028 if ((vmx->nested.nested_vmx_pinbased_ctls_high &
3029 PIN_BASED_VMX_PREEMPTION_TIMER) &&
3030 vmx_misc_preemption_timer_rate(data) !=
3031 vmx_misc_preemption_timer_rate(vmx_misc))
3034 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
3037 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
3040 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
3043 vmx->nested.nested_vmx_misc_low = data;
3044 vmx->nested.nested_vmx_misc_high = data >> 32;
3048 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
3050 u64 vmx_ept_vpid_cap;
3052 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.nested_vmx_ept_caps,
3053 vmx->nested.nested_vmx_vpid_caps);
3055 /* Every bit is either reserved or a feature bit. */
3056 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
3059 vmx->nested.nested_vmx_ept_caps = data;
3060 vmx->nested.nested_vmx_vpid_caps = data >> 32;
3064 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
3068 switch (msr_index) {
3069 case MSR_IA32_VMX_CR0_FIXED0:
3070 msr = &vmx->nested.nested_vmx_cr0_fixed0;
3072 case MSR_IA32_VMX_CR4_FIXED0:
3073 msr = &vmx->nested.nested_vmx_cr4_fixed0;
3080 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
3081 * must be 1 in the restored value.
3083 if (!is_bitwise_subset(data, *msr, -1ULL))
3091 * Called when userspace is restoring VMX MSRs.
3093 * Returns 0 on success, non-0 otherwise.
3095 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
3097 struct vcpu_vmx *vmx = to_vmx(vcpu);
3099 switch (msr_index) {
3100 case MSR_IA32_VMX_BASIC:
3101 return vmx_restore_vmx_basic(vmx, data);
3102 case MSR_IA32_VMX_PINBASED_CTLS:
3103 case MSR_IA32_VMX_PROCBASED_CTLS:
3104 case MSR_IA32_VMX_EXIT_CTLS:
3105 case MSR_IA32_VMX_ENTRY_CTLS:
3107 * The "non-true" VMX capability MSRs are generated from the
3108 * "true" MSRs, so we do not support restoring them directly.
3110 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
3111 * should restore the "true" MSRs with the must-be-1 bits
3112 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
3113 * DEFAULT SETTINGS".
3116 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3117 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3118 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3119 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3120 case MSR_IA32_VMX_PROCBASED_CTLS2:
3121 return vmx_restore_control_msr(vmx, msr_index, data);
3122 case MSR_IA32_VMX_MISC:
3123 return vmx_restore_vmx_misc(vmx, data);
3124 case MSR_IA32_VMX_CR0_FIXED0:
3125 case MSR_IA32_VMX_CR4_FIXED0:
3126 return vmx_restore_fixed0_msr(vmx, msr_index, data);
3127 case MSR_IA32_VMX_CR0_FIXED1:
3128 case MSR_IA32_VMX_CR4_FIXED1:
3130 * These MSRs are generated based on the vCPU's CPUID, so we
3131 * do not support restoring them directly.
3134 case MSR_IA32_VMX_EPT_VPID_CAP:
3135 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
3136 case MSR_IA32_VMX_VMCS_ENUM:
3137 vmx->nested.nested_vmx_vmcs_enum = data;
3141 * The rest of the VMX capability MSRs do not support restore.
3147 /* Returns 0 on success, non-0 otherwise. */
3148 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
3150 struct vcpu_vmx *vmx = to_vmx(vcpu);
3152 switch (msr_index) {
3153 case MSR_IA32_VMX_BASIC:
3154 *pdata = vmx->nested.nested_vmx_basic;
3156 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
3157 case MSR_IA32_VMX_PINBASED_CTLS:
3158 *pdata = vmx_control_msr(
3159 vmx->nested.nested_vmx_pinbased_ctls_low,
3160 vmx->nested.nested_vmx_pinbased_ctls_high);
3161 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
3162 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3164 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
3165 case MSR_IA32_VMX_PROCBASED_CTLS:
3166 *pdata = vmx_control_msr(
3167 vmx->nested.nested_vmx_procbased_ctls_low,
3168 vmx->nested.nested_vmx_procbased_ctls_high);
3169 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
3170 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
3172 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
3173 case MSR_IA32_VMX_EXIT_CTLS:
3174 *pdata = vmx_control_msr(
3175 vmx->nested.nested_vmx_exit_ctls_low,
3176 vmx->nested.nested_vmx_exit_ctls_high);
3177 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
3178 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
3180 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
3181 case MSR_IA32_VMX_ENTRY_CTLS:
3182 *pdata = vmx_control_msr(
3183 vmx->nested.nested_vmx_entry_ctls_low,
3184 vmx->nested.nested_vmx_entry_ctls_high);
3185 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
3186 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
3188 case MSR_IA32_VMX_MISC:
3189 *pdata = vmx_control_msr(
3190 vmx->nested.nested_vmx_misc_low,
3191 vmx->nested.nested_vmx_misc_high);
3193 case MSR_IA32_VMX_CR0_FIXED0:
3194 *pdata = vmx->nested.nested_vmx_cr0_fixed0;
3196 case MSR_IA32_VMX_CR0_FIXED1:
3197 *pdata = vmx->nested.nested_vmx_cr0_fixed1;
3199 case MSR_IA32_VMX_CR4_FIXED0:
3200 *pdata = vmx->nested.nested_vmx_cr4_fixed0;
3202 case MSR_IA32_VMX_CR4_FIXED1:
3203 *pdata = vmx->nested.nested_vmx_cr4_fixed1;
3205 case MSR_IA32_VMX_VMCS_ENUM:
3206 *pdata = vmx->nested.nested_vmx_vmcs_enum;
3208 case MSR_IA32_VMX_PROCBASED_CTLS2:
3209 *pdata = vmx_control_msr(
3210 vmx->nested.nested_vmx_secondary_ctls_low,
3211 vmx->nested.nested_vmx_secondary_ctls_high);
3213 case MSR_IA32_VMX_EPT_VPID_CAP:
3214 *pdata = vmx->nested.nested_vmx_ept_caps |
3215 ((u64)vmx->nested.nested_vmx_vpid_caps << 32);
3217 case MSR_IA32_VMX_VMFUNC:
3218 *pdata = vmx->nested.nested_vmx_vmfunc_controls;
3227 static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
3230 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
3232 return !(val & ~valid_bits);
3236 * Reads an msr value (of 'msr_index') into 'pdata'.
3237 * Returns 0 on success, non-0 otherwise.
3238 * Assumes vcpu_load() was already called.
3240 static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3242 struct shared_msr_entry *msr;
3244 switch (msr_info->index) {
3245 #ifdef CONFIG_X86_64
3247 msr_info->data = vmcs_readl(GUEST_FS_BASE);
3250 msr_info->data = vmcs_readl(GUEST_GS_BASE);
3252 case MSR_KERNEL_GS_BASE:
3253 vmx_load_host_state(to_vmx(vcpu));
3254 msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
3258 return kvm_get_msr_common(vcpu, msr_info);
3260 msr_info->data = guest_read_tsc(vcpu);
3262 case MSR_IA32_SYSENTER_CS:
3263 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
3265 case MSR_IA32_SYSENTER_EIP:
3266 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
3268 case MSR_IA32_SYSENTER_ESP:
3269 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
3271 case MSR_IA32_BNDCFGS:
3272 if (!kvm_mpx_supported() ||
3273 (!msr_info->host_initiated &&
3274 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3276 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
3278 case MSR_IA32_MCG_EXT_CTL:
3279 if (!msr_info->host_initiated &&
3280 !(to_vmx(vcpu)->msr_ia32_feature_control &
3281 FEATURE_CONTROL_LMCE))
3283 msr_info->data = vcpu->arch.mcg_ext_ctl;
3285 case MSR_IA32_FEATURE_CONTROL:
3286 msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
3288 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3289 if (!nested_vmx_allowed(vcpu))
3291 return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
3293 if (!vmx_xsaves_supported())
3295 msr_info->data = vcpu->arch.ia32_xss;
3298 if (!msr_info->host_initiated &&
3299 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3301 /* Otherwise falls through */
3303 msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
3305 msr_info->data = msr->data;
3308 return kvm_get_msr_common(vcpu, msr_info);
3314 static void vmx_leave_nested(struct kvm_vcpu *vcpu);
3317 * Writes msr value into into the appropriate "register".
3318 * Returns 0 on success, non-0 otherwise.
3319 * Assumes vcpu_load() was already called.
3321 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3323 struct vcpu_vmx *vmx = to_vmx(vcpu);
3324 struct shared_msr_entry *msr;
3326 u32 msr_index = msr_info->index;
3327 u64 data = msr_info->data;
3329 switch (msr_index) {
3331 ret = kvm_set_msr_common(vcpu, msr_info);
3333 #ifdef CONFIG_X86_64
3335 vmx_segment_cache_clear(vmx);
3336 vmcs_writel(GUEST_FS_BASE, data);
3339 vmx_segment_cache_clear(vmx);
3340 vmcs_writel(GUEST_GS_BASE, data);
3342 case MSR_KERNEL_GS_BASE:
3343 vmx_load_host_state(vmx);
3344 vmx->msr_guest_kernel_gs_base = data;
3347 case MSR_IA32_SYSENTER_CS:
3348 vmcs_write32(GUEST_SYSENTER_CS, data);
3350 case MSR_IA32_SYSENTER_EIP:
3351 vmcs_writel(GUEST_SYSENTER_EIP, data);
3353 case MSR_IA32_SYSENTER_ESP:
3354 vmcs_writel(GUEST_SYSENTER_ESP, data);
3356 case MSR_IA32_BNDCFGS:
3357 if (!kvm_mpx_supported() ||
3358 (!msr_info->host_initiated &&
3359 !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
3361 if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
3362 (data & MSR_IA32_BNDCFGS_RSVD))
3364 vmcs_write64(GUEST_BNDCFGS, data);
3367 kvm_write_tsc(vcpu, msr_info);
3369 case MSR_IA32_CR_PAT:
3370 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
3371 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3373 vmcs_write64(GUEST_IA32_PAT, data);
3374 vcpu->arch.pat = data;
3377 ret = kvm_set_msr_common(vcpu, msr_info);
3379 case MSR_IA32_TSC_ADJUST:
3380 ret = kvm_set_msr_common(vcpu, msr_info);
3382 case MSR_IA32_MCG_EXT_CTL:
3383 if ((!msr_info->host_initiated &&
3384 !(to_vmx(vcpu)->msr_ia32_feature_control &
3385 FEATURE_CONTROL_LMCE)) ||
3386 (data & ~MCG_EXT_CTL_LMCE_EN))
3388 vcpu->arch.mcg_ext_ctl = data;
3390 case MSR_IA32_FEATURE_CONTROL:
3391 if (!vmx_feature_control_msr_valid(vcpu, data) ||
3392 (to_vmx(vcpu)->msr_ia32_feature_control &
3393 FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
3395 vmx->msr_ia32_feature_control = data;
3396 if (msr_info->host_initiated && data == 0)
3397 vmx_leave_nested(vcpu);
3399 case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
3400 if (!msr_info->host_initiated)
3401 return 1; /* they are read-only */
3402 if (!nested_vmx_allowed(vcpu))
3404 return vmx_set_vmx_msr(vcpu, msr_index, data);
3406 if (!vmx_xsaves_supported())
3409 * The only supported bit as of Skylake is bit 8, but
3410 * it is not supported on KVM.
3414 vcpu->arch.ia32_xss = data;
3415 if (vcpu->arch.ia32_xss != host_xss)
3416 add_atomic_switch_msr(vmx, MSR_IA32_XSS,
3417 vcpu->arch.ia32_xss, host_xss);
3419 clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
3422 if (!msr_info->host_initiated &&
3423 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
3425 /* Check reserved bit, higher 32 bits should be zero */
3426 if ((data >> 32) != 0)
3428 /* Otherwise falls through */
3430 msr = find_msr_entry(vmx, msr_index);
3432 u64 old_msr_data = msr->data;
3434 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
3436 ret = kvm_set_shared_msr(msr->index, msr->data,
3440 msr->data = old_msr_data;
3444 ret = kvm_set_msr_common(vcpu, msr_info);
3450 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
3452 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
3455 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
3458 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
3460 case VCPU_EXREG_PDPTR:
3462 ept_save_pdptrs(vcpu);
3469 static __init int cpu_has_kvm_support(void)
3471 return cpu_has_vmx();
3474 static __init int vmx_disabled_by_bios(void)
3478 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
3479 if (msr & FEATURE_CONTROL_LOCKED) {
3480 /* launched w/ TXT and VMX disabled */
3481 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
3484 /* launched w/o TXT and VMX only enabled w/ TXT */
3485 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
3486 && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
3487 && !tboot_enabled()) {
3488 printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
3489 "activate TXT before enabling KVM\n");
3492 /* launched w/o TXT and VMX disabled */
3493 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
3494 && !tboot_enabled())
3501 static void kvm_cpu_vmxon(u64 addr)
3503 cr4_set_bits(X86_CR4_VMXE);
3504 intel_pt_handle_vmx(1);
3506 asm volatile (ASM_VMX_VMXON_RAX
3507 : : "a"(&addr), "m"(addr)
3511 static int hardware_enable(void)
3513 int cpu = raw_smp_processor_id();
3514 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
3517 if (cr4_read_shadow() & X86_CR4_VMXE)
3520 INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
3521 INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
3522 spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
3525 * Now we can enable the vmclear operation in kdump
3526 * since the loaded_vmcss_on_cpu list on this cpu
3527 * has been initialized.
3529 * Though the cpu is not in VMX operation now, there
3530 * is no problem to enable the vmclear operation
3531 * for the loaded_vmcss_on_cpu list is empty!
3533 crash_enable_local_vmclear(cpu);
3535 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
3537 test_bits = FEATURE_CONTROL_LOCKED;
3538 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
3539 if (tboot_enabled())
3540 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
3542 if ((old & test_bits) != test_bits) {
3543 /* enable and lock */
3544 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
3546 kvm_cpu_vmxon(phys_addr);
3552 static void vmclear_local_loaded_vmcss(void)
3554 int cpu = raw_smp_processor_id();
3555 struct loaded_vmcs *v, *n;
3557 list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
3558 loaded_vmcss_on_cpu_link)
3559 __loaded_vmcs_clear(v);
3563 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
3566 static void kvm_cpu_vmxoff(void)
3568 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
3570 intel_pt_handle_vmx(0);
3571 cr4_clear_bits(X86_CR4_VMXE);
3574 static void hardware_disable(void)
3576 vmclear_local_loaded_vmcss();
3580 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
3581 u32 msr, u32 *result)
3583 u32 vmx_msr_low, vmx_msr_high;
3584 u32 ctl = ctl_min | ctl_opt;
3586 rdmsr(msr, vmx_msr_low, vmx_msr_high);
3588 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
3589 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
3591 /* Ensure minimum (required) set of control bits are supported. */
3599 static __init bool allow_1_setting(u32 msr, u32 ctl)
3601 u32 vmx_msr_low, vmx_msr_high;
3603 rdmsr(msr, vmx_msr_low, vmx_msr_high);
3604 return vmx_msr_high & ctl;
3607 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
3609 u32 vmx_msr_low, vmx_msr_high;
3610 u32 min, opt, min2, opt2;
3611 u32 _pin_based_exec_control = 0;
3612 u32 _cpu_based_exec_control = 0;
3613 u32 _cpu_based_2nd_exec_control = 0;
3614 u32 _vmexit_control = 0;
3615 u32 _vmentry_control = 0;
3617 min = CPU_BASED_HLT_EXITING |
3618 #ifdef CONFIG_X86_64
3619 CPU_BASED_CR8_LOAD_EXITING |
3620 CPU_BASED_CR8_STORE_EXITING |
3622 CPU_BASED_CR3_LOAD_EXITING |
3623 CPU_BASED_CR3_STORE_EXITING |
3624 CPU_BASED_USE_IO_BITMAPS |
3625 CPU_BASED_MOV_DR_EXITING |
3626 CPU_BASED_USE_TSC_OFFSETING |
3627 CPU_BASED_INVLPG_EXITING |
3628 CPU_BASED_RDPMC_EXITING;
3630 if (!kvm_mwait_in_guest())
3631 min |= CPU_BASED_MWAIT_EXITING |
3632 CPU_BASED_MONITOR_EXITING;
3634 opt = CPU_BASED_TPR_SHADOW |
3635 CPU_BASED_USE_MSR_BITMAPS |
3636 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
3637 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
3638 &_cpu_based_exec_control) < 0)
3640 #ifdef CONFIG_X86_64
3641 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
3642 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
3643 ~CPU_BASED_CR8_STORE_EXITING;
3645 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
3647 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3648 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3649 SECONDARY_EXEC_WBINVD_EXITING |
3650 SECONDARY_EXEC_ENABLE_VPID |
3651 SECONDARY_EXEC_ENABLE_EPT |
3652 SECONDARY_EXEC_UNRESTRICTED_GUEST |
3653 SECONDARY_EXEC_PAUSE_LOOP_EXITING |
3654 SECONDARY_EXEC_RDTSCP |
3655 SECONDARY_EXEC_ENABLE_INVPCID |
3656 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3657 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3658 SECONDARY_EXEC_SHADOW_VMCS |
3659 SECONDARY_EXEC_XSAVES |
3660 SECONDARY_EXEC_RDSEED |
3661 SECONDARY_EXEC_RDRAND |
3662 SECONDARY_EXEC_ENABLE_PML |
3663 SECONDARY_EXEC_TSC_SCALING |
3664 SECONDARY_EXEC_ENABLE_VMFUNC;
3665 if (adjust_vmx_controls(min2, opt2,
3666 MSR_IA32_VMX_PROCBASED_CTLS2,
3667 &_cpu_based_2nd_exec_control) < 0)
3670 #ifndef CONFIG_X86_64
3671 if (!(_cpu_based_2nd_exec_control &
3672 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
3673 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
3676 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
3677 _cpu_based_2nd_exec_control &= ~(
3678 SECONDARY_EXEC_APIC_REGISTER_VIRT |
3679 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3680 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
3682 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
3683 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
3685 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
3686 CPU_BASED_CR3_STORE_EXITING |
3687 CPU_BASED_INVLPG_EXITING);
3688 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
3689 vmx_capability.ept, vmx_capability.vpid);
3692 min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
3693 #ifdef CONFIG_X86_64
3694 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
3696 opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
3697 VM_EXIT_CLEAR_BNDCFGS;
3698 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
3699 &_vmexit_control) < 0)
3702 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
3703 PIN_BASED_VIRTUAL_NMIS;
3704 opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
3705 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
3706 &_pin_based_exec_control) < 0)
3709 if (cpu_has_broken_vmx_preemption_timer())
3710 _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
3711 if (!(_cpu_based_2nd_exec_control &
3712 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
3713 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
3715 min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
3716 opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
3717 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
3718 &_vmentry_control) < 0)
3721 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
3723 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
3724 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
3727 #ifdef CONFIG_X86_64
3728 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
3729 if (vmx_msr_high & (1u<<16))
3733 /* Require Write-Back (WB) memory type for VMCS accesses. */
3734 if (((vmx_msr_high >> 18) & 15) != 6)
3737 vmcs_conf->size = vmx_msr_high & 0x1fff;
3738 vmcs_conf->order = get_order(vmcs_conf->size);
3739 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
3740 vmcs_conf->revision_id = vmx_msr_low;
3742 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
3743 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
3744 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
3745 vmcs_conf->vmexit_ctrl = _vmexit_control;
3746 vmcs_conf->vmentry_ctrl = _vmentry_control;
3748 cpu_has_load_ia32_efer =
3749 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
3750 VM_ENTRY_LOAD_IA32_EFER)
3751 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
3752 VM_EXIT_LOAD_IA32_EFER);
3754 cpu_has_load_perf_global_ctrl =
3755 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
3756 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
3757 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
3758 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
3761 * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
3762 * but due to errata below it can't be used. Workaround is to use
3763 * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
3765 * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
3770 * BC86,AAY89,BD102 (model 44)
3774 if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
3775 switch (boot_cpu_data.x86_model) {
3781 cpu_has_load_perf_global_ctrl = false;
3782 printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
3783 "does not work properly. Using workaround\n");
3790 if (boot_cpu_has(X86_FEATURE_XSAVES))
3791 rdmsrl(MSR_IA32_XSS, host_xss);
3796 static struct vmcs *alloc_vmcs_cpu(int cpu)
3798 int node = cpu_to_node(cpu);
3802 pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
3805 vmcs = page_address(pages);
3806 memset(vmcs, 0, vmcs_config.size);
3807 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
3811 static struct vmcs *alloc_vmcs(void)
3813 return alloc_vmcs_cpu(raw_smp_processor_id());
3816 static void free_vmcs(struct vmcs *vmcs)
3818 free_pages((unsigned long)vmcs, vmcs_config.order);
3822 * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
3824 static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
3826 if (!loaded_vmcs->vmcs)
3828 loaded_vmcs_clear(loaded_vmcs);
3829 free_vmcs(loaded_vmcs->vmcs);
3830 loaded_vmcs->vmcs = NULL;
3831 WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
3834 static void free_kvm_area(void)
3838 for_each_possible_cpu(cpu) {
3839 free_vmcs(per_cpu(vmxarea, cpu));
3840 per_cpu(vmxarea, cpu) = NULL;
3844 enum vmcs_field_type {
3845 VMCS_FIELD_TYPE_U16 = 0,
3846 VMCS_FIELD_TYPE_U64 = 1,
3847 VMCS_FIELD_TYPE_U32 = 2,
3848 VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
3851 static inline int vmcs_field_type(unsigned long field)
3853 if (0x1 & field) /* the *_HIGH fields are all 32 bit */
3854 return VMCS_FIELD_TYPE_U32;
3855 return (field >> 13) & 0x3 ;
3858 static inline int vmcs_field_readonly(unsigned long field)
3860 return (((field >> 10) & 0x3) == 1);
3863 static void init_vmcs_shadow_fields(void)
3867 /* No checks for read only fields yet */
3869 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
3870 switch (shadow_read_write_fields[i]) {
3872 if (!kvm_mpx_supported())
3880 shadow_read_write_fields[j] =
3881 shadow_read_write_fields[i];
3884 max_shadow_read_write_fields = j;
3886 /* shadowed fields guest access without vmexit */
3887 for (i = 0; i < max_shadow_read_write_fields; i++) {
3888 unsigned long field = shadow_read_write_fields[i];
3890 clear_bit(field, vmx_vmwrite_bitmap);
3891 clear_bit(field, vmx_vmread_bitmap);
3892 if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) {
3893 clear_bit(field + 1, vmx_vmwrite_bitmap);
3894 clear_bit(field + 1, vmx_vmread_bitmap);
3897 for (i = 0; i < max_shadow_read_only_fields; i++) {
3898 unsigned long field = shadow_read_only_fields[i];
3900 clear_bit(field, vmx_vmread_bitmap);
3901 if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64)
3902 clear_bit(field + 1, vmx_vmread_bitmap);
3906 static __init int alloc_kvm_area(void)
3910 for_each_possible_cpu(cpu) {
3913 vmcs = alloc_vmcs_cpu(cpu);
3919 per_cpu(vmxarea, cpu) = vmcs;
3924 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
3925 struct kvm_segment *save)
3927 if (!emulate_invalid_guest_state) {
3929 * CS and SS RPL should be equal during guest entry according
3930 * to VMX spec, but in reality it is not always so. Since vcpu
3931 * is in the middle of the transition from real mode to
3932 * protected mode it is safe to assume that RPL 0 is a good