1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/frame.h>
4 #include <linux/percpu.h>
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
16 static bool __read_mostly enable_shadow_vmcs = 1;
17 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
19 static bool __read_mostly nested_early_check = 0;
20 module_param(nested_early_check, bool, S_IRUGO);
22 extern const ulong vmx_early_consistency_check_return;
25 * Hyper-V requires all of these, so mark them as supported even though
26 * they are just treated the same as all-context.
28 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
29 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
30 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
31 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
32 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
34 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
41 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
43 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
44 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
46 static u16 shadow_read_only_fields[] = {
47 #define SHADOW_FIELD_RO(x) x,
48 #include "vmcs_shadow_fields.h"
50 static int max_shadow_read_only_fields =
51 ARRAY_SIZE(shadow_read_only_fields);
53 static u16 shadow_read_write_fields[] = {
54 #define SHADOW_FIELD_RW(x) x,
55 #include "vmcs_shadow_fields.h"
57 static int max_shadow_read_write_fields =
58 ARRAY_SIZE(shadow_read_write_fields);
60 void init_vmcs_shadow_fields(void)
64 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
65 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
67 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
68 u16 field = shadow_read_only_fields[i];
70 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
71 (i + 1 == max_shadow_read_only_fields ||
72 shadow_read_only_fields[i + 1] != field + 1))
73 pr_err("Missing field from shadow_read_only_field %x\n",
76 clear_bit(field, vmx_vmread_bitmap);
82 shadow_read_only_fields[j] = field;
85 max_shadow_read_only_fields = j;
87 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
88 u16 field = shadow_read_write_fields[i];
90 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
91 (i + 1 == max_shadow_read_write_fields ||
92 shadow_read_write_fields[i + 1] != field + 1))
93 pr_err("Missing field from shadow_read_write_field %x\n",
97 * PML and the preemption timer can be emulated, but the
98 * processor cannot vmwrite to fields that don't exist
102 case GUEST_PML_INDEX:
103 if (!cpu_has_vmx_pml())
106 case VMX_PREEMPTION_TIMER_VALUE:
107 if (!cpu_has_vmx_preemption_timer())
110 case GUEST_INTR_STATUS:
111 if (!cpu_has_vmx_apicv())
118 clear_bit(field, vmx_vmwrite_bitmap);
119 clear_bit(field, vmx_vmread_bitmap);
125 shadow_read_write_fields[j] = field;
128 max_shadow_read_write_fields = j;
132 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
133 * set the success or error code of an emulated VMX instruction (as specified
134 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
137 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
139 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
140 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
141 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
142 return kvm_skip_emulated_instruction(vcpu);
145 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
147 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
148 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
149 X86_EFLAGS_SF | X86_EFLAGS_OF))
151 return kvm_skip_emulated_instruction(vcpu);
154 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
155 u32 vm_instruction_error)
157 struct vcpu_vmx *vmx = to_vmx(vcpu);
160 * failValid writes the error number to the current VMCS, which
161 * can't be done if there isn't a current VMCS.
163 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
164 return nested_vmx_failInvalid(vcpu);
166 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
167 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
168 X86_EFLAGS_SF | X86_EFLAGS_OF))
170 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
172 * We don't need to force a shadow sync because
173 * VM_INSTRUCTION_ERROR is not shadowed
175 return kvm_skip_emulated_instruction(vcpu);
178 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
180 /* TODO: not to reset guest simply here. */
181 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
182 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
185 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
187 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
188 vmcs_write64(VMCS_LINK_POINTER, -1ull);
191 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
193 struct vcpu_vmx *vmx = to_vmx(vcpu);
195 if (!vmx->nested.hv_evmcs)
198 kunmap(vmx->nested.hv_evmcs_page);
199 kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
200 vmx->nested.hv_evmcs_vmptr = -1ull;
201 vmx->nested.hv_evmcs_page = NULL;
202 vmx->nested.hv_evmcs = NULL;
206 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
207 * just stops using VMX.
209 static void free_nested(struct kvm_vcpu *vcpu)
211 struct vcpu_vmx *vmx = to_vmx(vcpu);
213 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
216 vmx->nested.vmxon = false;
217 vmx->nested.smm.vmxon = false;
218 free_vpid(vmx->nested.vpid02);
219 vmx->nested.posted_intr_nv = -1;
220 vmx->nested.current_vmptr = -1ull;
221 if (enable_shadow_vmcs) {
222 vmx_disable_shadow_vmcs(vmx);
223 vmcs_clear(vmx->vmcs01.shadow_vmcs);
224 free_vmcs(vmx->vmcs01.shadow_vmcs);
225 vmx->vmcs01.shadow_vmcs = NULL;
227 kfree(vmx->nested.cached_vmcs12);
228 kfree(vmx->nested.cached_shadow_vmcs12);
229 /* Unpin physical memory we referred to in the vmcs02 */
230 if (vmx->nested.apic_access_page) {
231 kvm_release_page_dirty(vmx->nested.apic_access_page);
232 vmx->nested.apic_access_page = NULL;
234 if (vmx->nested.virtual_apic_page) {
235 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
236 vmx->nested.virtual_apic_page = NULL;
238 if (vmx->nested.pi_desc_page) {
239 kunmap(vmx->nested.pi_desc_page);
240 kvm_release_page_dirty(vmx->nested.pi_desc_page);
241 vmx->nested.pi_desc_page = NULL;
242 vmx->nested.pi_desc = NULL;
245 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
247 nested_release_evmcs(vcpu);
249 free_loaded_vmcs(&vmx->nested.vmcs02);
252 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
254 struct vcpu_vmx *vmx = to_vmx(vcpu);
257 if (vmx->loaded_vmcs == vmcs)
262 vmx->loaded_vmcs = vmcs;
263 vmx_vcpu_load(vcpu, cpu);
266 vm_entry_controls_reset_shadow(vmx);
267 vm_exit_controls_reset_shadow(vmx);
268 vmx_segment_cache_clear(vmx);
272 * Ensure that the current vmcs of the logical processor is the
273 * vmcs01 of the vcpu before calling free_nested().
275 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
278 vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
283 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
284 struct x86_exception *fault)
286 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
287 struct vcpu_vmx *vmx = to_vmx(vcpu);
289 unsigned long exit_qualification = vcpu->arch.exit_qualification;
291 if (vmx->nested.pml_full) {
292 exit_reason = EXIT_REASON_PML_FULL;
293 vmx->nested.pml_full = false;
294 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
295 } else if (fault->error_code & PFERR_RSVD_MASK)
296 exit_reason = EXIT_REASON_EPT_MISCONFIG;
298 exit_reason = EXIT_REASON_EPT_VIOLATION;
300 nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
301 vmcs12->guest_physical_address = fault->address;
304 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
306 WARN_ON(mmu_is_nested(vcpu));
308 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
309 kvm_init_shadow_ept_mmu(vcpu,
310 to_vmx(vcpu)->nested.msrs.ept_caps &
311 VMX_EPT_EXECUTE_ONLY_BIT,
312 nested_ept_ad_enabled(vcpu),
313 nested_ept_get_cr3(vcpu));
314 vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
315 vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
316 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
317 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
319 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
322 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
324 vcpu->arch.mmu = &vcpu->arch.root_mmu;
325 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
328 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
331 bool inequality, bit;
333 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
335 (error_code & vmcs12->page_fault_error_code_mask) !=
336 vmcs12->page_fault_error_code_match;
337 return inequality ^ bit;
342 * KVM wants to inject page-faults which it got to the guest. This function
343 * checks whether in a nested guest, we need to inject them to L1 or L2.
345 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
347 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
348 unsigned int nr = vcpu->arch.exception.nr;
349 bool has_payload = vcpu->arch.exception.has_payload;
350 unsigned long payload = vcpu->arch.exception.payload;
352 if (nr == PF_VECTOR) {
353 if (vcpu->arch.exception.nested_apf) {
354 *exit_qual = vcpu->arch.apf.nested_apf_token;
357 if (nested_vmx_is_page_fault_vmexit(vmcs12,
358 vcpu->arch.exception.error_code)) {
359 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
362 } else if (vmcs12->exception_bitmap & (1u << nr)) {
363 if (nr == DB_VECTOR) {
365 payload = vcpu->arch.dr6;
366 payload &= ~(DR6_FIXED_1 | DR6_BT);
369 *exit_qual = payload;
379 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
380 struct x86_exception *fault)
382 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
384 WARN_ON(!is_guest_mode(vcpu));
386 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
387 !to_vmx(vcpu)->nested.nested_run_pending) {
388 vmcs12->vm_exit_intr_error_code = fault->error_code;
389 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
390 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
391 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
394 kvm_inject_page_fault(vcpu, fault);
398 static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
400 return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
403 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
404 struct vmcs12 *vmcs12)
406 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
409 if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
410 !page_address_valid(vcpu, vmcs12->io_bitmap_b))
416 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
417 struct vmcs12 *vmcs12)
419 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
422 if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
428 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
429 struct vmcs12 *vmcs12)
431 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
434 if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
441 * Check if MSR is intercepted for L01 MSR bitmap.
443 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
445 unsigned long *msr_bitmap;
446 int f = sizeof(unsigned long);
448 if (!cpu_has_vmx_msr_bitmap())
451 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
454 return !!test_bit(msr, msr_bitmap + 0x800 / f);
455 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
457 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
464 * If a msr is allowed by L0, we should check whether it is allowed by L1.
465 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
467 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
468 unsigned long *msr_bitmap_nested,
471 int f = sizeof(unsigned long);
474 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
475 * have the write-low and read-high bitmap offsets the wrong way round.
476 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
479 if (type & MSR_TYPE_R &&
480 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
482 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
484 if (type & MSR_TYPE_W &&
485 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
487 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
489 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
491 if (type & MSR_TYPE_R &&
492 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
494 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
496 if (type & MSR_TYPE_W &&
497 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
499 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
505 * Merge L0's and L1's MSR bitmap, return false to indicate that
506 * we do not use the hardware.
508 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
509 struct vmcs12 *vmcs12)
513 unsigned long *msr_bitmap_l1;
514 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
516 * pred_cmd & spec_ctrl are trying to verify two things:
518 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
519 * ensures that we do not accidentally generate an L02 MSR bitmap
520 * from the L12 MSR bitmap that is too permissive.
521 * 2. That L1 or L2s have actually used the MSR. This avoids
522 * unnecessarily merging of the bitmap if the MSR is unused. This
523 * works properly because we only update the L01 MSR bitmap lazily.
524 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
525 * updated to reflect this when L1 (or its L2s) actually write to
528 bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
529 bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
531 /* Nothing to do if the MSR bitmap is not in use. */
532 if (!cpu_has_vmx_msr_bitmap() ||
533 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
536 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
537 !pred_cmd && !spec_ctrl)
540 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
541 if (is_error_page(page))
544 msr_bitmap_l1 = (unsigned long *)kmap(page);
545 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
547 * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
548 * just lets the processor take the value from the virtual-APIC page;
549 * take those 256 bits directly from the L1 bitmap.
551 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
552 unsigned word = msr / BITS_PER_LONG;
553 msr_bitmap_l0[word] = msr_bitmap_l1[word];
554 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
557 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
558 unsigned word = msr / BITS_PER_LONG;
559 msr_bitmap_l0[word] = ~0;
560 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
564 nested_vmx_disable_intercept_for_msr(
565 msr_bitmap_l1, msr_bitmap_l0,
566 X2APIC_MSR(APIC_TASKPRI),
569 if (nested_cpu_has_vid(vmcs12)) {
570 nested_vmx_disable_intercept_for_msr(
571 msr_bitmap_l1, msr_bitmap_l0,
572 X2APIC_MSR(APIC_EOI),
574 nested_vmx_disable_intercept_for_msr(
575 msr_bitmap_l1, msr_bitmap_l0,
576 X2APIC_MSR(APIC_SELF_IPI),
581 nested_vmx_disable_intercept_for_msr(
582 msr_bitmap_l1, msr_bitmap_l0,
584 MSR_TYPE_R | MSR_TYPE_W);
587 nested_vmx_disable_intercept_for_msr(
588 msr_bitmap_l1, msr_bitmap_l0,
593 kvm_release_page_clean(page);
598 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
599 struct vmcs12 *vmcs12)
601 struct vmcs12 *shadow;
604 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
605 vmcs12->vmcs_link_pointer == -1ull)
608 shadow = get_shadow_vmcs12(vcpu);
609 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
611 memcpy(shadow, kmap(page), VMCS12_SIZE);
614 kvm_release_page_clean(page);
617 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
618 struct vmcs12 *vmcs12)
620 struct vcpu_vmx *vmx = to_vmx(vcpu);
622 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
623 vmcs12->vmcs_link_pointer == -1ull)
626 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
627 get_shadow_vmcs12(vcpu), VMCS12_SIZE);
631 * In nested virtualization, check if L1 has set
632 * VM_EXIT_ACK_INTR_ON_EXIT
634 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
636 return get_vmcs12(vcpu)->vm_exit_controls &
637 VM_EXIT_ACK_INTR_ON_EXIT;
640 static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
642 return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
645 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
646 struct vmcs12 *vmcs12)
648 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
649 !page_address_valid(vcpu, vmcs12->apic_access_addr))
655 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
656 struct vmcs12 *vmcs12)
658 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
659 !nested_cpu_has_apic_reg_virt(vmcs12) &&
660 !nested_cpu_has_vid(vmcs12) &&
661 !nested_cpu_has_posted_intr(vmcs12))
665 * If virtualize x2apic mode is enabled,
666 * virtualize apic access must be disabled.
668 if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
669 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
673 * If virtual interrupt delivery is enabled,
674 * we must exit on external interrupts.
676 if (nested_cpu_has_vid(vmcs12) &&
677 !nested_exit_on_intr(vcpu))
681 * bits 15:8 should be zero in posted_intr_nv,
682 * the descriptor address has been already checked
683 * in nested_get_vmcs12_pages.
685 * bits 5:0 of posted_intr_desc_addr should be zero.
687 if (nested_cpu_has_posted_intr(vmcs12) &&
688 (!nested_cpu_has_vid(vmcs12) ||
689 !nested_exit_intr_ack_set(vcpu) ||
690 (vmcs12->posted_intr_nv & 0xff00) ||
691 (vmcs12->posted_intr_desc_addr & 0x3f) ||
692 (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
695 /* tpr shadow is needed by all apicv features. */
696 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
702 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
709 maxphyaddr = cpuid_maxphyaddr(vcpu);
710 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
711 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
717 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
718 struct vmcs12 *vmcs12)
720 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
721 vmcs12->vm_exit_msr_load_addr) ||
722 nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
723 vmcs12->vm_exit_msr_store_addr))
729 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
730 struct vmcs12 *vmcs12)
732 if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
733 vmcs12->vm_entry_msr_load_addr))
739 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
740 struct vmcs12 *vmcs12)
742 if (!nested_cpu_has_pml(vmcs12))
745 if (!nested_cpu_has_ept(vmcs12) ||
746 !page_address_valid(vcpu, vmcs12->pml_address))
752 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
753 struct vmcs12 *vmcs12)
755 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
756 !nested_cpu_has_ept(vmcs12))
761 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
762 struct vmcs12 *vmcs12)
764 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
765 !nested_cpu_has_ept(vmcs12))
770 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
771 struct vmcs12 *vmcs12)
773 if (!nested_cpu_has_shadow_vmcs(vmcs12))
776 if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
777 !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
783 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
784 struct vmx_msr_entry *e)
786 /* x2APIC MSR accesses are not allowed */
787 if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
789 if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
790 e->index == MSR_IA32_UCODE_REV)
792 if (e->reserved != 0)
797 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
798 struct vmx_msr_entry *e)
800 if (e->index == MSR_FS_BASE ||
801 e->index == MSR_GS_BASE ||
802 e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
803 nested_vmx_msr_check_common(vcpu, e))
808 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
809 struct vmx_msr_entry *e)
811 if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
812 nested_vmx_msr_check_common(vcpu, e))
818 * Load guest's/host's msr at nested entry/exit.
819 * return 0 for success, entry index for failure.
821 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
824 struct vmx_msr_entry e;
827 msr.host_initiated = false;
828 for (i = 0; i < count; i++) {
829 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
831 pr_debug_ratelimited(
832 "%s cannot read MSR entry (%u, 0x%08llx)\n",
833 __func__, i, gpa + i * sizeof(e));
836 if (nested_vmx_load_msr_check(vcpu, &e)) {
837 pr_debug_ratelimited(
838 "%s check failed (%u, 0x%x, 0x%x)\n",
839 __func__, i, e.index, e.reserved);
844 if (kvm_set_msr(vcpu, &msr)) {
845 pr_debug_ratelimited(
846 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
847 __func__, i, e.index, e.value);
856 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
859 struct vmx_msr_entry e;
861 for (i = 0; i < count; i++) {
862 struct msr_data msr_info;
863 if (kvm_vcpu_read_guest(vcpu,
865 &e, 2 * sizeof(u32))) {
866 pr_debug_ratelimited(
867 "%s cannot read MSR entry (%u, 0x%08llx)\n",
868 __func__, i, gpa + i * sizeof(e));
871 if (nested_vmx_store_msr_check(vcpu, &e)) {
872 pr_debug_ratelimited(
873 "%s check failed (%u, 0x%x, 0x%x)\n",
874 __func__, i, e.index, e.reserved);
877 msr_info.host_initiated = false;
878 msr_info.index = e.index;
879 if (kvm_get_msr(vcpu, &msr_info)) {
880 pr_debug_ratelimited(
881 "%s cannot read MSR (%u, 0x%x)\n",
882 __func__, i, e.index);
885 if (kvm_vcpu_write_guest(vcpu,
886 gpa + i * sizeof(e) +
887 offsetof(struct vmx_msr_entry, value),
888 &msr_info.data, sizeof(msr_info.data))) {
889 pr_debug_ratelimited(
890 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
891 __func__, i, e.index, msr_info.data);
898 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
900 unsigned long invalid_mask;
902 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
903 return (val & invalid_mask) == 0;
907 * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
908 * emulating VM entry into a guest with EPT enabled.
909 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
910 * is assigned to entry_failure_code on failure.
912 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
913 u32 *entry_failure_code)
915 if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
916 if (!nested_cr3_valid(vcpu, cr3)) {
917 *entry_failure_code = ENTRY_FAIL_DEFAULT;
922 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
923 * must not be dereferenced.
925 if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
927 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
928 *entry_failure_code = ENTRY_FAIL_PDPTE;
935 kvm_mmu_new_cr3(vcpu, cr3, false);
937 vcpu->arch.cr3 = cr3;
938 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
940 kvm_init_mmu(vcpu, false);
946 * Returns if KVM is able to config CPU to tag TLB entries
947 * populated by L2 differently than TLB entries populated
950 * If L1 uses EPT, then TLB entries are tagged with different EPTP.
952 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
953 * with different VPID (L1 entries are tagged with vmx->vpid
954 * while L2 entries are tagged with vmx->nested.vpid02).
956 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
958 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
960 return nested_cpu_has_ept(vmcs12) ||
961 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
964 static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
966 struct vcpu_vmx *vmx = to_vmx(vcpu);
968 return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
972 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
974 return fixed_bits_valid(control, low, high);
977 static inline u64 vmx_control_msr(u32 low, u32 high)
979 return low | ((u64)high << 32);
982 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
987 return (superset | subset) == superset;
990 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
992 const u64 feature_and_reserved =
993 /* feature (except bit 48; see below) */
994 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
996 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
997 u64 vmx_basic = vmx->nested.msrs.basic;
999 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1003 * KVM does not emulate a version of VMX that constrains physical
1004 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1006 if (data & BIT_ULL(48))
1009 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1010 vmx_basic_vmcs_revision_id(data))
1013 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1016 vmx->nested.msrs.basic = data;
1021 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1026 switch (msr_index) {
1027 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1028 lowp = &vmx->nested.msrs.pinbased_ctls_low;
1029 highp = &vmx->nested.msrs.pinbased_ctls_high;
1031 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1032 lowp = &vmx->nested.msrs.procbased_ctls_low;
1033 highp = &vmx->nested.msrs.procbased_ctls_high;
1035 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1036 lowp = &vmx->nested.msrs.exit_ctls_low;
1037 highp = &vmx->nested.msrs.exit_ctls_high;
1039 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1040 lowp = &vmx->nested.msrs.entry_ctls_low;
1041 highp = &vmx->nested.msrs.entry_ctls_high;
1043 case MSR_IA32_VMX_PROCBASED_CTLS2:
1044 lowp = &vmx->nested.msrs.secondary_ctls_low;
1045 highp = &vmx->nested.msrs.secondary_ctls_high;
1051 supported = vmx_control_msr(*lowp, *highp);
1053 /* Check must-be-1 bits are still 1. */
1054 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1057 /* Check must-be-0 bits are still 0. */
1058 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1062 *highp = data >> 32;
1066 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1068 const u64 feature_and_reserved_bits =
1070 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1071 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1073 GENMASK_ULL(13, 9) | BIT_ULL(31);
1076 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
1077 vmx->nested.msrs.misc_high);
1079 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1082 if ((vmx->nested.msrs.pinbased_ctls_high &
1083 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1084 vmx_misc_preemption_timer_rate(data) !=
1085 vmx_misc_preemption_timer_rate(vmx_misc))
1088 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1091 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1094 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1097 vmx->nested.msrs.misc_low = data;
1098 vmx->nested.msrs.misc_high = data >> 32;
1101 * If L1 has read-only VM-exit information fields, use the
1102 * less permissive vmx_vmwrite_bitmap to specify write
1103 * permissions for the shadow VMCS.
1105 if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
1106 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
1111 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1113 u64 vmx_ept_vpid_cap;
1115 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
1116 vmx->nested.msrs.vpid_caps);
1118 /* Every bit is either reserved or a feature bit. */
1119 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1122 vmx->nested.msrs.ept_caps = data;
1123 vmx->nested.msrs.vpid_caps = data >> 32;
1127 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1131 switch (msr_index) {
1132 case MSR_IA32_VMX_CR0_FIXED0:
1133 msr = &vmx->nested.msrs.cr0_fixed0;
1135 case MSR_IA32_VMX_CR4_FIXED0:
1136 msr = &vmx->nested.msrs.cr4_fixed0;
1143 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1144 * must be 1 in the restored value.
1146 if (!is_bitwise_subset(data, *msr, -1ULL))
1154 * Called when userspace is restoring VMX MSRs.
1156 * Returns 0 on success, non-0 otherwise.
1158 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1160 struct vcpu_vmx *vmx = to_vmx(vcpu);
1163 * Don't allow changes to the VMX capability MSRs while the vCPU
1164 * is in VMX operation.
1166 if (vmx->nested.vmxon)
1169 switch (msr_index) {
1170 case MSR_IA32_VMX_BASIC:
1171 return vmx_restore_vmx_basic(vmx, data);
1172 case MSR_IA32_VMX_PINBASED_CTLS:
1173 case MSR_IA32_VMX_PROCBASED_CTLS:
1174 case MSR_IA32_VMX_EXIT_CTLS:
1175 case MSR_IA32_VMX_ENTRY_CTLS:
1177 * The "non-true" VMX capability MSRs are generated from the
1178 * "true" MSRs, so we do not support restoring them directly.
1180 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1181 * should restore the "true" MSRs with the must-be-1 bits
1182 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1183 * DEFAULT SETTINGS".
1186 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1187 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1188 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1189 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1190 case MSR_IA32_VMX_PROCBASED_CTLS2:
1191 return vmx_restore_control_msr(vmx, msr_index, data);
1192 case MSR_IA32_VMX_MISC:
1193 return vmx_restore_vmx_misc(vmx, data);
1194 case MSR_IA32_VMX_CR0_FIXED0:
1195 case MSR_IA32_VMX_CR4_FIXED0:
1196 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1197 case MSR_IA32_VMX_CR0_FIXED1:
1198 case MSR_IA32_VMX_CR4_FIXED1:
1200 * These MSRs are generated based on the vCPU's CPUID, so we
1201 * do not support restoring them directly.
1204 case MSR_IA32_VMX_EPT_VPID_CAP:
1205 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1206 case MSR_IA32_VMX_VMCS_ENUM:
1207 vmx->nested.msrs.vmcs_enum = data;
1211 * The rest of the VMX capability MSRs do not support restore.
1217 /* Returns 0 on success, non-0 otherwise. */
1218 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1220 switch (msr_index) {
1221 case MSR_IA32_VMX_BASIC:
1222 *pdata = msrs->basic;
1224 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1225 case MSR_IA32_VMX_PINBASED_CTLS:
1226 *pdata = vmx_control_msr(
1227 msrs->pinbased_ctls_low,
1228 msrs->pinbased_ctls_high);
1229 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1230 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1232 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1233 case MSR_IA32_VMX_PROCBASED_CTLS:
1234 *pdata = vmx_control_msr(
1235 msrs->procbased_ctls_low,
1236 msrs->procbased_ctls_high);
1237 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1238 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1240 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1241 case MSR_IA32_VMX_EXIT_CTLS:
1242 *pdata = vmx_control_msr(
1243 msrs->exit_ctls_low,
1244 msrs->exit_ctls_high);
1245 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1246 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1248 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1249 case MSR_IA32_VMX_ENTRY_CTLS:
1250 *pdata = vmx_control_msr(
1251 msrs->entry_ctls_low,
1252 msrs->entry_ctls_high);
1253 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1254 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1256 case MSR_IA32_VMX_MISC:
1257 *pdata = vmx_control_msr(
1261 case MSR_IA32_VMX_CR0_FIXED0:
1262 *pdata = msrs->cr0_fixed0;
1264 case MSR_IA32_VMX_CR0_FIXED1:
1265 *pdata = msrs->cr0_fixed1;
1267 case MSR_IA32_VMX_CR4_FIXED0:
1268 *pdata = msrs->cr4_fixed0;
1270 case MSR_IA32_VMX_CR4_FIXED1:
1271 *pdata = msrs->cr4_fixed1;
1273 case MSR_IA32_VMX_VMCS_ENUM:
1274 *pdata = msrs->vmcs_enum;
1276 case MSR_IA32_VMX_PROCBASED_CTLS2:
1277 *pdata = vmx_control_msr(
1278 msrs->secondary_ctls_low,
1279 msrs->secondary_ctls_high);
1281 case MSR_IA32_VMX_EPT_VPID_CAP:
1282 *pdata = msrs->ept_caps |
1283 ((u64)msrs->vpid_caps << 32);
1285 case MSR_IA32_VMX_VMFUNC:
1286 *pdata = msrs->vmfunc_controls;
1296 * Copy the writable VMCS shadow fields back to the VMCS12, in case
1297 * they have been modified by the L1 guest. Note that the "read-only"
1298 * VM-exit information fields are actually writable if the vCPU is
1299 * configured to support "VMWRITE to any supported field in the VMCS."
1301 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1303 const u16 *fields[] = {
1304 shadow_read_write_fields,
1305 shadow_read_only_fields
1307 const int max_fields[] = {
1308 max_shadow_read_write_fields,
1309 max_shadow_read_only_fields
1312 unsigned long field;
1314 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1318 vmcs_load(shadow_vmcs);
1320 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1321 for (i = 0; i < max_fields[q]; i++) {
1322 field = fields[q][i];
1323 field_value = __vmcs_readl(field);
1324 vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value);
1327 * Skip the VM-exit information fields if they are read-only.
1329 if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
1333 vmcs_clear(shadow_vmcs);
1334 vmcs_load(vmx->loaded_vmcs->vmcs);
1339 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1341 const u16 *fields[] = {
1342 shadow_read_write_fields,
1343 shadow_read_only_fields
1345 const int max_fields[] = {
1346 max_shadow_read_write_fields,
1347 max_shadow_read_only_fields
1350 unsigned long field;
1351 u64 field_value = 0;
1352 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1354 vmcs_load(shadow_vmcs);
1356 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1357 for (i = 0; i < max_fields[q]; i++) {
1358 field = fields[q][i];
1359 vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value);
1360 __vmcs_writel(field, field_value);
1364 vmcs_clear(shadow_vmcs);
1365 vmcs_load(vmx->loaded_vmcs->vmcs);
1368 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
1370 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1371 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1373 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1374 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1375 vmcs12->guest_rip = evmcs->guest_rip;
1377 if (unlikely(!(evmcs->hv_clean_fields &
1378 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1379 vmcs12->guest_rsp = evmcs->guest_rsp;
1380 vmcs12->guest_rflags = evmcs->guest_rflags;
1381 vmcs12->guest_interruptibility_info =
1382 evmcs->guest_interruptibility_info;
1385 if (unlikely(!(evmcs->hv_clean_fields &
1386 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1387 vmcs12->cpu_based_vm_exec_control =
1388 evmcs->cpu_based_vm_exec_control;
1391 if (unlikely(!(evmcs->hv_clean_fields &
1392 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1393 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1396 if (unlikely(!(evmcs->hv_clean_fields &
1397 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1398 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1401 if (unlikely(!(evmcs->hv_clean_fields &
1402 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1403 vmcs12->vm_entry_intr_info_field =
1404 evmcs->vm_entry_intr_info_field;
1405 vmcs12->vm_entry_exception_error_code =
1406 evmcs->vm_entry_exception_error_code;
1407 vmcs12->vm_entry_instruction_len =
1408 evmcs->vm_entry_instruction_len;
1411 if (unlikely(!(evmcs->hv_clean_fields &
1412 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1413 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1414 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1415 vmcs12->host_cr0 = evmcs->host_cr0;
1416 vmcs12->host_cr3 = evmcs->host_cr3;
1417 vmcs12->host_cr4 = evmcs->host_cr4;
1418 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1419 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1420 vmcs12->host_rip = evmcs->host_rip;
1421 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1422 vmcs12->host_es_selector = evmcs->host_es_selector;
1423 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1424 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1425 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1426 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1427 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1428 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1431 if (unlikely(!(evmcs->hv_clean_fields &
1432 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1433 vmcs12->pin_based_vm_exec_control =
1434 evmcs->pin_based_vm_exec_control;
1435 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1436 vmcs12->secondary_vm_exec_control =
1437 evmcs->secondary_vm_exec_control;
1440 if (unlikely(!(evmcs->hv_clean_fields &
1441 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1442 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1443 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1446 if (unlikely(!(evmcs->hv_clean_fields &
1447 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1448 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1451 if (unlikely(!(evmcs->hv_clean_fields &
1452 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1453 vmcs12->guest_es_base = evmcs->guest_es_base;
1454 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1455 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1456 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1457 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1458 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1459 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1460 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1461 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1462 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1463 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1464 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1465 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1466 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1467 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1468 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1469 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1470 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1471 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1472 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1473 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1474 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1475 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1476 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1477 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1478 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1479 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1480 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1481 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1482 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1483 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1484 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1485 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1486 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1487 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1488 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1491 if (unlikely(!(evmcs->hv_clean_fields &
1492 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1493 vmcs12->tsc_offset = evmcs->tsc_offset;
1494 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1495 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1498 if (unlikely(!(evmcs->hv_clean_fields &
1499 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1500 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1501 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1502 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1503 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1504 vmcs12->guest_cr0 = evmcs->guest_cr0;
1505 vmcs12->guest_cr3 = evmcs->guest_cr3;
1506 vmcs12->guest_cr4 = evmcs->guest_cr4;
1507 vmcs12->guest_dr7 = evmcs->guest_dr7;
1510 if (unlikely(!(evmcs->hv_clean_fields &
1511 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1512 vmcs12->host_fs_base = evmcs->host_fs_base;
1513 vmcs12->host_gs_base = evmcs->host_gs_base;
1514 vmcs12->host_tr_base = evmcs->host_tr_base;
1515 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1516 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1517 vmcs12->host_rsp = evmcs->host_rsp;
1520 if (unlikely(!(evmcs->hv_clean_fields &
1521 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1522 vmcs12->ept_pointer = evmcs->ept_pointer;
1523 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1526 if (unlikely(!(evmcs->hv_clean_fields &
1527 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1528 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1529 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1530 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1531 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1532 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1533 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1534 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1535 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1536 vmcs12->guest_pending_dbg_exceptions =
1537 evmcs->guest_pending_dbg_exceptions;
1538 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1539 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1540 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1541 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1542 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1547 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1548 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1549 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1550 * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
1551 * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
1552 * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
1553 * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
1554 * vmcs12->page_fault_error_code_mask =
1555 * evmcs->page_fault_error_code_mask;
1556 * vmcs12->page_fault_error_code_match =
1557 * evmcs->page_fault_error_code_match;
1558 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1559 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1560 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1561 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1566 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1567 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1568 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1569 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1570 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1571 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1572 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1573 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1574 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1575 * vmcs12->exit_qualification = evmcs->exit_qualification;
1576 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1578 * Not present in struct vmcs12:
1579 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1580 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1581 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1582 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1588 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1590 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1591 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1594 * Should not be changed by KVM:
1596 * evmcs->host_es_selector = vmcs12->host_es_selector;
1597 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1598 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1599 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1600 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1601 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1602 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1603 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1604 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1605 * evmcs->host_cr0 = vmcs12->host_cr0;
1606 * evmcs->host_cr3 = vmcs12->host_cr3;
1607 * evmcs->host_cr4 = vmcs12->host_cr4;
1608 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1609 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1610 * evmcs->host_rip = vmcs12->host_rip;
1611 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1612 * evmcs->host_fs_base = vmcs12->host_fs_base;
1613 * evmcs->host_gs_base = vmcs12->host_gs_base;
1614 * evmcs->host_tr_base = vmcs12->host_tr_base;
1615 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1616 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1617 * evmcs->host_rsp = vmcs12->host_rsp;
1618 * sync_vmcs12() doesn't read these:
1619 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1620 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1621 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1622 * evmcs->ept_pointer = vmcs12->ept_pointer;
1623 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1624 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1625 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1626 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1627 * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
1628 * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
1629 * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
1630 * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
1631 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1632 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1633 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1634 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1635 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1636 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1637 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1638 * evmcs->page_fault_error_code_mask =
1639 * vmcs12->page_fault_error_code_mask;
1640 * evmcs->page_fault_error_code_match =
1641 * vmcs12->page_fault_error_code_match;
1642 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1643 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1644 * evmcs->tsc_offset = vmcs12->tsc_offset;
1645 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1646 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1647 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1648 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1649 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1650 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1651 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1652 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1654 * Not present in struct vmcs12:
1655 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1656 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1657 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1658 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1661 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1662 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1663 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1664 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1665 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1666 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1667 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1668 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1670 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1671 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1672 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1673 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1674 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1675 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1676 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1677 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1678 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1679 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1681 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1682 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1683 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1684 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1685 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1686 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1687 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1688 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1690 evmcs->guest_es_base = vmcs12->guest_es_base;
1691 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1692 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1693 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1694 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1695 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1696 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1697 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1698 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1699 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1701 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1702 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1704 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1705 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1706 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1707 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1709 evmcs->guest_pending_dbg_exceptions =
1710 vmcs12->guest_pending_dbg_exceptions;
1711 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1712 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1714 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1715 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1717 evmcs->guest_cr0 = vmcs12->guest_cr0;
1718 evmcs->guest_cr3 = vmcs12->guest_cr3;
1719 evmcs->guest_cr4 = vmcs12->guest_cr4;
1720 evmcs->guest_dr7 = vmcs12->guest_dr7;
1722 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1724 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1725 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1726 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1727 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1728 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1729 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1730 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1731 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1733 evmcs->exit_qualification = vmcs12->exit_qualification;
1735 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1736 evmcs->guest_rsp = vmcs12->guest_rsp;
1737 evmcs->guest_rflags = vmcs12->guest_rflags;
1739 evmcs->guest_interruptibility_info =
1740 vmcs12->guest_interruptibility_info;
1741 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1742 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1743 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1744 evmcs->vm_entry_exception_error_code =
1745 vmcs12->vm_entry_exception_error_code;
1746 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1748 evmcs->guest_rip = vmcs12->guest_rip;
1750 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1756 * This is an equivalent of the nested hypervisor executing the vmptrld
1759 static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
1762 struct vcpu_vmx *vmx = to_vmx(vcpu);
1763 struct hv_vp_assist_page assist_page;
1765 if (likely(!vmx->nested.enlightened_vmcs_enabled))
1768 if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
1771 if (unlikely(!assist_page.enlighten_vmentry))
1774 if (unlikely(assist_page.current_nested_vmcs !=
1775 vmx->nested.hv_evmcs_vmptr)) {
1777 if (!vmx->nested.hv_evmcs)
1778 vmx->nested.current_vmptr = -1ull;
1780 nested_release_evmcs(vcpu);
1782 vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page(
1783 vcpu, assist_page.current_nested_vmcs);
1785 if (unlikely(is_error_page(vmx->nested.hv_evmcs_page)))
1788 vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
1791 * Currently, KVM only supports eVMCS version 1
1792 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1793 * value to first u32 field of eVMCS which should specify eVMCS
1796 * Guest should be aware of supported eVMCS versions by host by
1797 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
1798 * expected to set this CPUID leaf according to the value
1799 * returned in vmcs_version from nested_enable_evmcs().
1801 * However, it turns out that Microsoft Hyper-V fails to comply
1802 * to their own invented interface: When Hyper-V use eVMCS, it
1803 * just sets first u32 field of eVMCS to revision_id specified
1804 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
1805 * which is one of the supported versions specified in
1806 * CPUID.0x4000000A.EAX[0:15].
1808 * To overcome Hyper-V bug, we accept here either a supported
1809 * eVMCS version or VMCS12 revision_id as valid values for first
1810 * u32 field of eVMCS.
1812 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
1813 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
1814 nested_release_evmcs(vcpu);
1818 vmx->nested.dirty_vmcs12 = true;
1820 * As we keep L2 state for one guest only 'hv_clean_fields' mask
1821 * can't be used when we switch between them. Reset it here for
1824 vmx->nested.hv_evmcs->hv_clean_fields &=
1825 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1826 vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs;
1829 * Unlike normal vmcs12, enlightened vmcs12 is not fully
1830 * reloaded from guest's memory (read only fields, fields not
1831 * present in struct hv_enlightened_vmcs, ...). Make sure there
1835 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1836 memset(vmcs12, 0, sizeof(*vmcs12));
1837 vmcs12->hdr.revision_id = VMCS12_REVISION;
1844 void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu)
1846 struct vcpu_vmx *vmx = to_vmx(vcpu);
1849 * hv_evmcs may end up being not mapped after migration (when
1850 * L2 was running), map it here to make sure vmcs12 changes are
1851 * properly reflected.
1853 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
1854 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
1856 if (vmx->nested.hv_evmcs) {
1857 copy_vmcs12_to_enlightened(vmx);
1858 /* All fields are clean */
1859 vmx->nested.hv_evmcs->hv_clean_fields |=
1860 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
1862 copy_vmcs12_to_shadow(vmx);
1865 vmx->nested.need_vmcs12_sync = false;
1868 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
1870 struct vcpu_vmx *vmx =
1871 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
1873 vmx->nested.preemption_timer_expired = true;
1874 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
1875 kvm_vcpu_kick(&vmx->vcpu);
1877 return HRTIMER_NORESTART;
1880 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
1882 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
1883 struct vcpu_vmx *vmx = to_vmx(vcpu);
1886 * A timer value of zero is architecturally guaranteed to cause
1887 * a VMExit prior to executing any instructions in the guest.
1889 if (preemption_timeout == 0) {
1890 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
1894 if (vcpu->arch.virtual_tsc_khz == 0)
1897 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
1898 preemption_timeout *= 1000000;
1899 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
1900 hrtimer_start(&vmx->nested.preemption_timer,
1901 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
1904 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1906 if (vmx->nested.nested_run_pending &&
1907 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
1908 return vmcs12->guest_ia32_efer;
1909 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
1910 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
1912 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
1915 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
1918 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
1919 * according to L0's settings (vmcs12 is irrelevant here). Host
1920 * fields that come from L0 and are not constant, e.g. HOST_CR3,
1921 * will be set as needed prior to VMLAUNCH/VMRESUME.
1923 if (vmx->nested.vmcs02_initialized)
1925 vmx->nested.vmcs02_initialized = true;
1928 * We don't care what the EPTP value is we just need to guarantee
1929 * it's valid so we don't get a false positive when doing early
1930 * consistency checks.
1932 if (enable_ept && nested_early_check)
1933 vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
1935 /* All VMFUNCs are currently emulated through L0 vmexits. */
1936 if (cpu_has_vmx_vmfunc())
1937 vmcs_write64(VM_FUNCTION_CONTROL, 0);
1939 if (cpu_has_vmx_posted_intr())
1940 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
1942 if (cpu_has_vmx_msr_bitmap())
1943 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
1946 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
1949 * Set the MSR load/store lists to match L0's settings. Only the
1950 * addresses are constant (for vmcs02), the counts can change based
1951 * on L2's behavior, e.g. switching to/from long mode.
1953 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
1954 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
1955 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
1957 vmx_set_constant_host_state(vmx);
1960 static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx,
1961 struct vmcs12 *vmcs12)
1963 prepare_vmcs02_constant_state(vmx);
1965 vmcs_write64(VMCS_LINK_POINTER, -1ull);
1968 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
1969 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
1971 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
1975 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1977 u32 exec_control, vmcs12_exec_ctrl;
1978 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
1980 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
1981 prepare_vmcs02_early_full(vmx, vmcs12);
1984 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
1985 * entry, but only if the current (host) sp changed from the value
1986 * we wrote last (vmx->host_rsp). This cache is no longer relevant
1987 * if we switch vmcs, and rather than hold a separate cache per vmcs,
1988 * here we just force the write to happen on entry. host_rsp will
1989 * also be written unconditionally by nested_vmx_check_vmentry_hw()
1990 * if we are doing early consistency checks via hardware.
1997 exec_control = vmcs12->pin_based_vm_exec_control;
1999 /* Preemption timer setting is computed directly in vmx_vcpu_run. */
2000 exec_control |= vmcs_config.pin_based_exec_ctrl;
2001 exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
2002 vmx->loaded_vmcs->hv_timer_armed = false;
2004 /* Posted interrupts setting is only taken from vmcs12. */
2005 if (nested_cpu_has_posted_intr(vmcs12)) {
2006 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2007 vmx->nested.pi_pending = false;
2009 exec_control &= ~PIN_BASED_POSTED_INTR;
2011 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
2016 exec_control = vmx_exec_control(vmx); /* L0's desires */
2017 exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2018 exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
2019 exec_control &= ~CPU_BASED_TPR_SHADOW;
2020 exec_control |= vmcs12->cpu_based_vm_exec_control;
2023 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
2024 * nested_get_vmcs12_pages can't fix it up, the illegal value
2025 * will result in a VM entry failure.
2027 if (exec_control & CPU_BASED_TPR_SHADOW) {
2028 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
2029 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2031 #ifdef CONFIG_X86_64
2032 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2033 CPU_BASED_CR8_STORE_EXITING;
2038 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2039 * for I/O port accesses.
2041 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2042 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2043 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
2046 * SECONDARY EXEC CONTROLS
2048 if (cpu_has_secondary_exec_ctrls()) {
2049 exec_control = vmx->secondary_exec_control;
2051 /* Take the following fields only from vmcs12 */
2052 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2053 SECONDARY_EXEC_ENABLE_INVPCID |
2054 SECONDARY_EXEC_RDTSCP |
2055 SECONDARY_EXEC_XSAVES |
2056 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2057 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2058 SECONDARY_EXEC_ENABLE_VMFUNC);
2059 if (nested_cpu_has(vmcs12,
2060 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
2061 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
2062 ~SECONDARY_EXEC_ENABLE_PML;
2063 exec_control |= vmcs12_exec_ctrl;
2066 /* VMCS shadowing for L2 is emulated for now */
2067 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2069 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2070 vmcs_write16(GUEST_INTR_STATUS,
2071 vmcs12->guest_intr_status);
2074 * Write an illegal value to APIC_ACCESS_ADDR. Later,
2075 * nested_get_vmcs12_pages will either fix it up or
2076 * remove the VM execution control.
2078 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
2079 vmcs_write64(APIC_ACCESS_ADDR, -1ull);
2081 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
2082 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
2084 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2090 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2091 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2092 * on the related bits (if supported by the CPU) in the hope that
2093 * we can avoid VMWrites during vmx_set_efer().
2095 exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
2096 ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
2097 if (cpu_has_load_ia32_efer()) {
2098 if (guest_efer & EFER_LMA)
2099 exec_control |= VM_ENTRY_IA32E_MODE;
2100 if (guest_efer != host_efer)
2101 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2103 vm_entry_controls_init(vmx, exec_control);
2108 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2109 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2110 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2112 exec_control = vmx_vmexit_ctrl();
2113 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2114 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2115 vm_exit_controls_init(vmx, exec_control);
2118 * Conceptually we want to copy the PML address and index from
2119 * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
2120 * since we always flush the log on each vmexit and never change
2121 * the PML address (once set), this happens to be equivalent to
2122 * simply resetting the index in vmcs02.
2125 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
2128 * Interrupt/Exception Fields
2130 if (vmx->nested.nested_run_pending) {
2131 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2132 vmcs12->vm_entry_intr_info_field);
2133 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2134 vmcs12->vm_entry_exception_error_code);
2135 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2136 vmcs12->vm_entry_instruction_len);
2137 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2138 vmcs12->guest_interruptibility_info);
2139 vmx->loaded_vmcs->nmi_known_unmasked =
2140 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2142 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2146 static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2148 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2150 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2151 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2152 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2153 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2154 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2155 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2156 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2157 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2158 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2159 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2160 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2161 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2162 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2163 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2164 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2165 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2166 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2167 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2168 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2169 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2170 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2171 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2172 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2173 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2174 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2175 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2176 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2177 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2178 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2179 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2180 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2181 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2182 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2183 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2184 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2185 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2188 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2189 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2190 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2191 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2192 vmcs12->guest_pending_dbg_exceptions);
2193 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2194 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2197 * L1 may access the L2's PDPTR, so save them to construct
2201 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2202 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2203 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2204 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2208 if (nested_cpu_has_xsaves(vmcs12))
2209 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2212 * Whether page-faults are trapped is determined by a combination of
2213 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
2214 * If enable_ept, L0 doesn't care about page faults and we should
2215 * set all of these to L1's desires. However, if !enable_ept, L0 does
2216 * care about (at least some) page faults, and because it is not easy
2217 * (if at all possible?) to merge L0 and L1's desires, we simply ask
2218 * to exit on each and every L2 page fault. This is done by setting
2219 * MASK=MATCH=0 and (see below) EB.PF=1.
2220 * Note that below we don't need special code to set EB.PF beyond the
2221 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2222 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2223 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2225 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
2226 enable_ept ? vmcs12->page_fault_error_code_mask : 0);
2227 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
2228 enable_ept ? vmcs12->page_fault_error_code_match : 0);
2230 if (cpu_has_vmx_apicv()) {
2231 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2232 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2233 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2234 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2237 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2238 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2240 set_cr4_guest_host_mask(vmx);
2242 if (kvm_mpx_supported()) {
2243 if (vmx->nested.nested_run_pending &&
2244 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2245 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2247 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
2252 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2253 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2254 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2255 * guest in a way that will both be appropriate to L1's requests, and our
2256 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2257 * function also has additional necessary side-effects, like setting various
2258 * vcpu->arch fields.
2259 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2260 * is assigned to entry_failure_code on failure.
2262 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2263 u32 *entry_failure_code)
2265 struct vcpu_vmx *vmx = to_vmx(vcpu);
2266 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2268 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) {
2269 prepare_vmcs02_full(vmx, vmcs12);
2270 vmx->nested.dirty_vmcs12 = false;
2274 * First, the fields that are shadowed. This must be kept in sync
2275 * with vmcs_shadow_fields.h.
2277 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2278 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2279 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2280 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2283 if (vmx->nested.nested_run_pending &&
2284 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2285 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2286 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2288 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2289 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2291 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2293 vmx->nested.preemption_timer_expired = false;
2294 if (nested_cpu_has_preemption_timer(vmcs12))
2295 vmx_start_preemption_timer(vcpu);
2297 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2298 * bitwise-or of what L1 wants to trap for L2, and what we want to
2299 * trap. Note that CR0.TS also needs updating - we do this later.
2301 update_exception_bitmap(vcpu);
2302 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2303 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2305 if (vmx->nested.nested_run_pending &&
2306 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2307 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2308 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2309 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2310 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2313 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2315 if (kvm_has_tsc_control)
2316 decache_tsc_multiplier(vmx);
2320 * There is no direct mapping between vpid02 and vpid12, the
2321 * vpid02 is per-vCPU for L0 and reused while the value of
2322 * vpid12 is changed w/ one invvpid during nested vmentry.
2323 * The vpid12 is allocated by L1 for L2, so it will not
2324 * influence global bitmap(for vpid01 and vpid02 allocation)
2325 * even if spawn a lot of nested vCPUs.
2327 if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
2328 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
2329 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
2330 __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
2334 * If L1 use EPT, then L0 needs to execute INVEPT on
2335 * EPTP02 instead of EPTP01. Therefore, delay TLB
2336 * flush until vmcs02->eptp is fully updated by
2337 * KVM_REQ_LOAD_CR3. Note that this assumes
2338 * KVM_REQ_TLB_FLUSH is evaluated after
2339 * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
2341 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2345 if (nested_cpu_has_ept(vmcs12))
2346 nested_ept_init_mmu_context(vcpu);
2347 else if (nested_cpu_has2(vmcs12,
2348 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2349 vmx_flush_tlb(vcpu, true);
2352 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2353 * bits which we consider mandatory enabled.
2354 * The CR0_READ_SHADOW is what L2 should have expected to read given
2355 * the specifications by L1; It's not enough to take
2356 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2357 * have more bits than L1 expected.
2359 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2360 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2362 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2363 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2365 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2366 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2367 vmx_set_efer(vcpu, vcpu->arch.efer);
2370 * Guest state is invalid and unrestricted guest is disabled,
2371 * which means L1 attempted VMEntry to L2 with invalid state.
2374 if (vmx->emulation_required) {
2375 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2379 /* Shadow page tables on either EPT or shadow page tables. */
2380 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2381 entry_failure_code))
2385 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2387 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
2388 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
2392 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2394 if (!nested_cpu_has_nmi_exiting(vmcs12) &&
2395 nested_cpu_has_virtual_nmis(vmcs12))
2398 if (!nested_cpu_has_virtual_nmis(vmcs12) &&
2399 nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
2405 static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
2407 struct vcpu_vmx *vmx = to_vmx(vcpu);
2408 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2410 /* Check for memory type validity */
2411 switch (address & VMX_EPTP_MT_MASK) {
2412 case VMX_EPTP_MT_UC:
2413 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
2416 case VMX_EPTP_MT_WB:
2417 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
2424 /* only 4 levels page-walk length are valid */
2425 if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
2428 /* Reserved bits should not be set */
2429 if (address >> maxphyaddr || ((address >> 7) & 0x1f))
2432 /* AD, if set, should be supported */
2433 if (address & VMX_EPTP_AD_ENABLE_BIT) {
2434 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
2442 * Checks related to VM-Execution Control Fields
2444 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2445 struct vmcs12 *vmcs12)
2447 struct vcpu_vmx *vmx = to_vmx(vcpu);
2449 if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2450 vmx->nested.msrs.pinbased_ctls_low,
2451 vmx->nested.msrs.pinbased_ctls_high) ||
2452 !vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2453 vmx->nested.msrs.procbased_ctls_low,
2454 vmx->nested.msrs.procbased_ctls_high))
2457 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2458 !vmx_control_verify(vmcs12->secondary_vm_exec_control,
2459 vmx->nested.msrs.secondary_ctls_low,
2460 vmx->nested.msrs.secondary_ctls_high))
2463 if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) ||
2464 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2465 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2466 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2467 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2468 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2469 nested_vmx_check_nmi_controls(vmcs12) ||
2470 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2471 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2472 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2473 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2474 (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2477 if (nested_cpu_has_ept(vmcs12) &&
2478 !valid_ept_address(vcpu, vmcs12->ept_pointer))
2481 if (nested_cpu_has_vmfunc(vmcs12)) {
2482 if (vmcs12->vm_function_control &
2483 ~vmx->nested.msrs.vmfunc_controls)
2486 if (nested_cpu_has_eptp_switching(vmcs12)) {
2487 if (!nested_cpu_has_ept(vmcs12) ||
2488 !page_address_valid(vcpu, vmcs12->eptp_list_address))
2497 * Checks related to VM-Exit Control Fields
2499 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2500 struct vmcs12 *vmcs12)
2502 struct vcpu_vmx *vmx = to_vmx(vcpu);
2504 if (!vmx_control_verify(vmcs12->vm_exit_controls,
2505 vmx->nested.msrs.exit_ctls_low,
2506 vmx->nested.msrs.exit_ctls_high) ||
2507 nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
2514 * Checks related to VM-Entry Control Fields
2516 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2517 struct vmcs12 *vmcs12)
2519 struct vcpu_vmx *vmx = to_vmx(vcpu);
2521 if (!vmx_control_verify(vmcs12->vm_entry_controls,
2522 vmx->nested.msrs.entry_ctls_low,
2523 vmx->nested.msrs.entry_ctls_high))
2527 * From the Intel SDM, volume 3:
2528 * Fields relevant to VM-entry event injection must be set properly.
2529 * These fields are the VM-entry interruption-information field, the
2530 * VM-entry exception error code, and the VM-entry instruction length.
2532 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2533 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2534 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2535 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2536 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2537 bool should_have_error_code;
2538 bool urg = nested_cpu_has2(vmcs12,
2539 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2540 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2542 /* VM-entry interruption-info field: interruption type */
2543 if (intr_type == INTR_TYPE_RESERVED ||
2544 (intr_type == INTR_TYPE_OTHER_EVENT &&
2545 !nested_cpu_supports_monitor_trap_flag(vcpu)))
2548 /* VM-entry interruption-info field: vector */
2549 if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2550 (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2551 (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2554 /* VM-entry interruption-info field: deliver error code */
2555 should_have_error_code =
2556 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2557 x86_exception_has_error_code(vector);
2558 if (has_error_code != should_have_error_code)
2561 /* VM-entry exception error code */
2562 if (has_error_code &&
2563 vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
2566 /* VM-entry interruption-info field: reserved bits */
2567 if (intr_info & INTR_INFO_RESVD_BITS_MASK)
2570 /* VM-entry instruction length */
2571 switch (intr_type) {
2572 case INTR_TYPE_SOFT_EXCEPTION:
2573 case INTR_TYPE_SOFT_INTR:
2574 case INTR_TYPE_PRIV_SW_EXCEPTION:
2575 if ((vmcs12->vm_entry_instruction_len > 15) ||
2576 (vmcs12->vm_entry_instruction_len == 0 &&
2577 !nested_cpu_has_zero_length_injection(vcpu)))
2582 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2588 static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
2589 struct vmcs12 *vmcs12)
2593 if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2594 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
2595 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2597 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2598 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2599 nested_check_vm_entry_controls(vcpu, vmcs12))
2600 return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
2602 if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
2603 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
2604 !nested_cr3_valid(vcpu, vmcs12->host_cr3))
2605 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
2608 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2609 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2610 * the values of the LMA and LME bits in the field must each be that of
2611 * the host address-space size VM-exit control.
2613 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2614 ia32e = (vmcs12->vm_exit_controls &
2615 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
2616 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
2617 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
2618 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
2619 return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
2625 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2626 struct vmcs12 *vmcs12)
2630 struct vmcs12 *shadow;
2632 if (vmcs12->vmcs_link_pointer == -1ull)
2635 if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
2638 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
2639 if (is_error_page(page))
2643 shadow = kmap(page);
2644 if (shadow->hdr.revision_id != VMCS12_REVISION ||
2645 shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
2648 kvm_release_page_clean(page);
2652 static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
2653 struct vmcs12 *vmcs12,
2658 *exit_qual = ENTRY_FAIL_DEFAULT;
2660 if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
2661 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
2664 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
2665 *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
2670 * If the load IA32_EFER VM-entry control is 1, the following checks
2671 * are performed on the field for the IA32_EFER MSR:
2672 * - Bits reserved in the IA32_EFER MSR must be 0.
2673 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
2674 * the IA-32e mode guest VM-exit control. It must also be identical
2675 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
2678 if (to_vmx(vcpu)->nested.nested_run_pending &&
2679 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
2680 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
2681 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
2682 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
2683 ((vmcs12->guest_cr0 & X86_CR0_PG) &&
2684 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
2688 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
2689 (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
2690 (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
2696 static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
2698 struct vcpu_vmx *vmx = to_vmx(vcpu);
2699 unsigned long cr3, cr4;
2701 if (!nested_early_check)
2704 if (vmx->msr_autoload.host.nr)
2705 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2706 if (vmx->msr_autoload.guest.nr)
2707 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2711 vmx_prepare_switch_to_guest(vcpu);
2714 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
2715 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
2716 * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
2717 * there is no need to preserve other bits or save/restore the field.
2719 vmcs_writel(GUEST_RFLAGS, 0);
2721 vmcs_writel(HOST_RIP, vmx_early_consistency_check_return);
2723 cr3 = __get_current_cr3_fast();
2724 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
2725 vmcs_writel(HOST_CR3, cr3);
2726 vmx->loaded_vmcs->host_state.cr3 = cr3;
2729 cr4 = cr4_read_shadow();
2730 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
2731 vmcs_writel(HOST_CR4, cr4);
2732 vmx->loaded_vmcs->host_state.cr4 = cr4;
2735 vmx->__launched = vmx->loaded_vmcs->launched;
2739 __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
2740 "mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t"
2742 /* Check if vmlaunch or vmresume is needed */
2743 "cmpl $0, %c[launched](%0)\n\t"
2745 __ex("vmlaunch") "\n\t"
2747 "1: " __ex("vmresume") "\n\t"
2749 /* Set vmx->fail accordingly */
2750 "setbe %c[fail](%0)\n\t"
2752 ".pushsection .rodata\n\t"
2753 ".global vmx_early_consistency_check_return\n\t"
2754 "vmx_early_consistency_check_return: " _ASM_PTR " 2b\n\t"
2757 : "c"(vmx), "d"((unsigned long)HOST_RSP),
2758 [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
2759 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
2760 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp))
2761 : "rax", "cc", "memory"
2764 vmcs_writel(HOST_RIP, vmx_return);
2768 if (vmx->msr_autoload.host.nr)
2769 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2770 if (vmx->msr_autoload.guest.nr)
2771 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2774 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
2775 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
2781 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
2784 if (hw_breakpoint_active())
2785 set_debugreg(__this_cpu_read(cpu_dr7), 7);
2788 * A non-failing VMEntry means we somehow entered guest mode with
2789 * an illegal RIP, and that's just the tip of the iceberg. There
2790 * is no telling what memory has been modified or what state has
2791 * been exposed to unknown code. Hitting this all but guarantees
2792 * a (very critical) hardware issue.
2794 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
2795 VMX_EXIT_REASONS_FAILED_VMENTRY));
2799 STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw);
2802 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
2803 struct vmcs12 *vmcs12);
2805 static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
2807 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2808 struct vcpu_vmx *vmx = to_vmx(vcpu);
2812 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
2814 * Translate L1 physical address to host physical
2815 * address for vmcs02. Keep the page pinned, so this
2816 * physical address remains valid. We keep a reference
2817 * to it so we can release it later.
2819 if (vmx->nested.apic_access_page) { /* shouldn't happen */
2820 kvm_release_page_dirty(vmx->nested.apic_access_page);
2821 vmx->nested.apic_access_page = NULL;
2823 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
2825 * If translation failed, no matter: This feature asks
2826 * to exit when accessing the given address, and if it
2827 * can never be accessed, this feature won't do
2830 if (!is_error_page(page)) {
2831 vmx->nested.apic_access_page = page;
2832 hpa = page_to_phys(vmx->nested.apic_access_page);
2833 vmcs_write64(APIC_ACCESS_ADDR, hpa);
2835 vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
2836 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
2840 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
2841 if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
2842 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
2843 vmx->nested.virtual_apic_page = NULL;
2845 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
2848 * If translation failed, VM entry will fail because
2849 * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
2850 * Failing the vm entry is _not_ what the processor
2851 * does but it's basically the only possibility we
2852 * have. We could still enter the guest if CR8 load
2853 * exits are enabled, CR8 store exits are enabled, and
2854 * virtualize APIC access is disabled; in this case
2855 * the processor would never use the TPR shadow and we
2856 * could simply clear the bit from the execution
2857 * control. But such a configuration is useless, so
2858 * let's keep the code simple.
2860 if (!is_error_page(page)) {
2861 vmx->nested.virtual_apic_page = page;
2862 hpa = page_to_phys(vmx->nested.virtual_apic_page);
2863 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
2867 if (nested_cpu_has_posted_intr(vmcs12)) {
2868 if (vmx->nested.pi_desc_page) { /* shouldn't happen */
2869 kunmap(vmx->nested.pi_desc_page);
2870 kvm_release_page_dirty(vmx->nested.pi_desc_page);
2871 vmx->nested.pi_desc_page = NULL;
2873 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
2874 if (is_error_page(page))
2876 vmx->nested.pi_desc_page = page;
2877 vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
2878 vmx->nested.pi_desc =
2879 (struct pi_desc *)((void *)vmx->nested.pi_desc +
2880 (unsigned long)(vmcs12->posted_intr_desc_addr &
2882 vmcs_write64(POSTED_INTR_DESC_ADDR,
2883 page_to_phys(vmx->nested.pi_desc_page) +
2884 (unsigned long)(vmcs12->posted_intr_desc_addr &
2887 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
2888 vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
2889 CPU_BASED_USE_MSR_BITMAPS);
2891 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
2892 CPU_BASED_USE_MSR_BITMAPS);
2896 * Intel's VMX Instruction Reference specifies a common set of prerequisites
2897 * for running VMX instructions (except VMXON, whose prerequisites are
2898 * slightly different). It also specifies what exception to inject otherwise.
2899 * Note that many of these exceptions have priority over VM exits, so they
2900 * don't have to be checked again here.
2902 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
2904 if (!to_vmx(vcpu)->nested.vmxon) {
2905 kvm_queue_exception(vcpu, UD_VECTOR);
2909 if (vmx_get_cpl(vcpu)) {
2910 kvm_inject_gp(vcpu, 0);
2917 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
2919 u8 rvi = vmx_get_rvi();
2920 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
2922 return ((rvi & 0xf0) > (vppr & 0xf0));
2925 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
2926 struct vmcs12 *vmcs12);
2929 * If from_vmentry is false, this is being called from state restore (either RSM
2930 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
2933 + * 0 - success, i.e. proceed with actual VMEnter
2934 + * 1 - consistency check VMExit
2935 + * -1 - consistency check VMFail
2937 int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
2939 struct vcpu_vmx *vmx = to_vmx(vcpu);
2940 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2941 bool evaluate_pending_interrupts;
2942 u32 exit_reason = EXIT_REASON_INVALID_STATE;
2945 evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
2946 (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
2947 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
2948 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
2950 if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
2951 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
2952 if (kvm_mpx_supported() &&
2953 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2954 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
2956 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
2958 prepare_vmcs02_early(vmx, vmcs12);
2961 nested_get_vmcs12_pages(vcpu);
2963 if (nested_vmx_check_vmentry_hw(vcpu)) {
2964 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
2968 if (nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
2969 goto vmentry_fail_vmexit;
2972 enter_guest_mode(vcpu);
2973 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
2974 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
2976 if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
2977 goto vmentry_fail_vmexit_guest_mode;
2980 exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
2981 exit_qual = nested_vmx_load_msr(vcpu,
2982 vmcs12->vm_entry_msr_load_addr,
2983 vmcs12->vm_entry_msr_load_count);
2985 goto vmentry_fail_vmexit_guest_mode;
2988 * The MMU is not initialized to point at the right entities yet and
2989 * "get pages" would need to read data from the guest (i.e. we will
2990 * need to perform gpa to hpa translation). Request a call
2991 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
2992 * have already been set at vmentry time and should not be reset.
2994 kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
2998 * If L1 had a pending IRQ/NMI until it executed
2999 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3000 * disallowed (e.g. interrupts disabled), L0 needs to
3001 * evaluate if this pending event should cause an exit from L2
3002 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3003 * intercept EXTERNAL_INTERRUPT).
3005 * Usually this would be handled by the processor noticing an
3006 * IRQ/NMI window request, or checking RVI during evaluation of
3007 * pending virtual interrupts. However, this setting was done
3008 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3009 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3011 if (unlikely(evaluate_pending_interrupts))
3012 kvm_make_request(KVM_REQ_EVENT, vcpu);
3015 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3016 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3017 * returned as far as L1 is concerned. It will only return (and set
3018 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3023 * A failed consistency check that leads to a VMExit during L1's
3024 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3025 * 26.7 "VM-entry failures during or after loading guest state".
3027 vmentry_fail_vmexit_guest_mode:
3028 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3029 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3030 leave_guest_mode(vcpu);
3032 vmentry_fail_vmexit:
3033 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3038 load_vmcs12_host_state(vcpu, vmcs12);
3039 vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
3040 vmcs12->exit_qualification = exit_qual;
3041 if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
3042 vmx->nested.need_vmcs12_sync = true;
3047 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3048 * for running an L2 nested guest.
3050 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3052 struct vmcs12 *vmcs12;
3053 struct vcpu_vmx *vmx = to_vmx(vcpu);
3054 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3057 if (!nested_vmx_check_permission(vcpu))
3060 if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true))
3063 if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
3064 return nested_vmx_failInvalid(vcpu);
3066 vmcs12 = get_vmcs12(vcpu);
3069 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3070 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3071 * rather than RFLAGS.ZF, and no error number is stored to the
3072 * VM-instruction error field.
3074 if (vmcs12->hdr.shadow_vmcs)
3075 return nested_vmx_failInvalid(vcpu);
3077 if (vmx->nested.hv_evmcs) {
3078 copy_enlightened_to_vmcs12(vmx);
3079 /* Enlightened VMCS doesn't have launch state */
3080 vmcs12->launch_state = !launch;
3081 } else if (enable_shadow_vmcs) {
3082 copy_shadow_to_vmcs12(vmx);
3086 * The nested entry process starts with enforcing various prerequisites
3087 * on vmcs12 as required by the Intel SDM, and act appropriately when
3088 * they fail: As the SDM explains, some conditions should cause the
3089 * instruction to fail, while others will cause the instruction to seem
3090 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3091 * To speed up the normal (success) code path, we should avoid checking
3092 * for misconfigurations which will anyway be caught by the processor
3093 * when using the merged vmcs02.
3095 if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
3096 return nested_vmx_failValid(vcpu,
3097 VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3099 if (vmcs12->launch_state == launch)
3100 return nested_vmx_failValid(vcpu,
3101 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3102 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3104 ret = nested_vmx_check_vmentry_prereqs(vcpu, vmcs12);
3106 return nested_vmx_failValid(vcpu, ret);
3109 * We're finally done with prerequisite checking, and can start with
3112 vmx->nested.nested_run_pending = 1;
3113 ret = nested_vmx_enter_non_root_mode(vcpu, true);
3114 vmx->nested.nested_run_pending = !ret;
3118 return nested_vmx_failValid(vcpu,
3119 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3121 /* Hide L1D cache contents from the nested guest. */
3122 vmx->vcpu.arch.l1tf_flush_l1d = true;
3125 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3126 * also be used as part of restoring nVMX state for
3127 * snapshot restore (migration).
3129 * In this flow, it is assumed that vmcs12 cache was
3130 * trasferred as part of captured nVMX state and should
3131 * therefore not be read from guest memory (which may not
3132 * exist on destination host yet).
3134 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3137 * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
3138 * by event injection, halt vcpu.
3140 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
3141 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) {
3142 vmx->nested.nested_run_pending = 0;
3143 return kvm_vcpu_halt(vcpu);
3149 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3150 * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
3151 * This function returns the new value we should put in vmcs12.guest_cr0.
3152 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3153 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3154 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3155 * didn't trap the bit, because if L1 did, so would L0).
3156 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3157 * been modified by L2, and L1 knows it. So just leave the old value of
3158 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3159 * isn't relevant, because if L0 traps this bit it can set it to anything.
3160 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3161 * changed these bits, and therefore they need to be updated, but L0
3162 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3163 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3165 static inline unsigned long
3166 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3169 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3170 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3171 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3172 vcpu->arch.cr0_guest_owned_bits));
3175 static inline unsigned long
3176 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3179 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3180 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3181 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3182 vcpu->arch.cr4_guest_owned_bits));
3185 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3186 struct vmcs12 *vmcs12)
3191 if (vcpu->arch.exception.injected) {
3192 nr = vcpu->arch.exception.nr;
3193 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3195 if (kvm_exception_is_soft(nr)) {
3196 vmcs12->vm_exit_instruction_len =
3197 vcpu->arch.event_exit_inst_len;
3198 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3200 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3202 if (vcpu->arch.exception.has_error_code) {
3203 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3204 vmcs12->idt_vectoring_error_code =
3205 vcpu->arch.exception.error_code;
3208 vmcs12->idt_vectoring_info_field = idt_vectoring;
3209 } else if (vcpu->arch.nmi_injected) {
3210 vmcs12->idt_vectoring_info_field =
3211 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3212 } else if (vcpu->arch.interrupt.injected) {
3213 nr = vcpu->arch.interrupt.nr;
3214 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3216 if (vcpu->arch.interrupt.soft) {
3217 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3218 vmcs12->vm_entry_instruction_len =
3219 vcpu->arch.event_exit_inst_len;
3221 idt_vectoring |= INTR_TYPE_EXT_INTR;
3223 vmcs12->idt_vectoring_info_field = idt_vectoring;
3228 static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3230 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3234 * Don't need to mark the APIC access page dirty; it is never
3235 * written to by the CPU during APIC virtualization.
3238 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3239 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3240 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3243 if (nested_cpu_has_posted_intr(vmcs12)) {
3244 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3245 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3249 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3251 struct vcpu_vmx *vmx = to_vmx(vcpu);
3256 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
3259 vmx->nested.pi_pending = false;
3260 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3263 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3264 if (max_irr != 256) {
3265 vapic_page = kmap(vmx->nested.virtual_apic_page);
3266 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3267 vapic_page, &max_irr);
3268 kunmap(vmx->nested.virtual_apic_page);
3270 status = vmcs_read16(GUEST_INTR_STATUS);
3271 if ((u8)max_irr > ((u8)status & 0xff)) {
3273 status |= (u8)max_irr;
3274 vmcs_write16(GUEST_INTR_STATUS, status);
3278 nested_mark_vmcs12_pages_dirty(vcpu);
3281 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3282 unsigned long exit_qual)
3284 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3285 unsigned int nr = vcpu->arch.exception.nr;
3286 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3288 if (vcpu->arch.exception.has_error_code) {
3289 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3290 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3293 if (kvm_exception_is_soft(nr))
3294 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3296 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3298 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3299 vmx_get_nmi_mask(vcpu))
3300 intr_info |= INTR_INFO_UNBLOCK_NMI;
3302 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3305 static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
3307 struct vcpu_vmx *vmx = to_vmx(vcpu);
3308 unsigned long exit_qual;
3309 bool block_nested_events =
3310 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3312 if (vcpu->arch.exception.pending &&
3313 nested_vmx_check_exception(vcpu, &exit_qual)) {
3314 if (block_nested_events)
3316 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3320 if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3321 vmx->nested.preemption_timer_expired) {
3322 if (block_nested_events)
3324 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3328 if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
3329 if (block_nested_events)
3331 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3332 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3333 INTR_INFO_VALID_MASK, 0);
3335 * The NMI-triggered VM exit counts as injection:
3336 * clear this one and block further NMIs.
3338 vcpu->arch.nmi_pending = 0;
3339 vmx_set_nmi_mask(vcpu, true);
3343 if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
3344 nested_exit_on_intr(vcpu)) {
3345 if (block_nested_events)
3347 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3351 vmx_complete_nested_posted_interrupt(vcpu);
3355 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3358 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3361 if (ktime_to_ns(remaining) <= 0)
3364 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3365 do_div(value, 1000000);
3366 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3370 * Update the guest state fields of vmcs12 to reflect changes that
3371 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
3372 * VM-entry controls is also updated, since this is really a guest
3375 static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3377 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
3378 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
3380 vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3381 vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
3382 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
3384 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
3385 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
3386 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
3387 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
3388 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
3389 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
3390 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
3391 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
3392 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
3393 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
3394 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
3395 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
3396 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
3397 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
3398 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
3399 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
3400 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
3401 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
3402 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
3403 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
3404 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
3405 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
3406 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
3407 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
3408 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
3409 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
3410 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
3411 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
3412 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
3413 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
3414 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
3415 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
3416 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
3417 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
3418 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
3419 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
3421 vmcs12->guest_interruptibility_info =
3422 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
3423 vmcs12->guest_pending_dbg_exceptions =
3424 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
3425 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
3426 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
3428 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
3430 if (nested_cpu_has_preemption_timer(vmcs12)) {
3431 if (vmcs12->vm_exit_controls &
3432 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
3433 vmcs12->vmx_preemption_timer_value =
3434 vmx_get_preemption_timer_value(vcpu);
3435 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
3439 * In some cases (usually, nested EPT), L2 is allowed to change its
3440 * own CR3 without exiting. If it has changed it, we must keep it.
3441 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
3442 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
3444 * Additionally, restore L2's PDPTR to vmcs12.
3447 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
3448 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
3449 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
3450 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
3451 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
3454 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
3456 if (nested_cpu_has_vid(vmcs12))
3457 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
3459 vmcs12->vm_entry_controls =
3460 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
3461 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
3463 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
3464 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
3465 vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3468 /* TODO: These cannot have changed unless we have MSR bitmaps and
3469 * the relevant bit asks not to trap the change */
3470 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
3471 vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
3472 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
3473 vmcs12->guest_ia32_efer = vcpu->arch.efer;
3474 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
3475 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
3476 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
3477 if (kvm_mpx_supported())
3478 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3482 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
3483 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
3484 * and this function updates it to reflect the changes to the guest state while
3485 * L2 was running (and perhaps made some exits which were handled directly by L0
3486 * without going back to L1), and to reflect the exit reason.
3487 * Note that we do not have to copy here all VMCS fields, just those that
3488 * could have changed by the L2 guest or the exit - i.e., the guest-state and
3489 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
3490 * which already writes to vmcs12 directly.
3492 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
3493 u32 exit_reason, u32 exit_intr_info,
3494 unsigned long exit_qualification)
3496 /* update guest state fields: */
3497 sync_vmcs12(vcpu, vmcs12);
3499 /* update exit information fields: */
3501 vmcs12->vm_exit_reason = exit_reason;
3502 vmcs12->exit_qualification = exit_qualification;
3503 vmcs12->vm_exit_intr_info = exit_intr_info;
3505 vmcs12->idt_vectoring_info_field = 0;
3506 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3507 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
3509 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
3510 vmcs12->launch_state = 1;
3512 /* vm_entry_intr_info_field is cleared on exit. Emulate this
3513 * instead of reading the real value. */
3514 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
3517 * Transfer the event that L0 or L1 may wanted to inject into
3518 * L2 to IDT_VECTORING_INFO_FIELD.
3520 vmcs12_save_pending_event(vcpu, vmcs12);
3523 * According to spec, there's no need to store the guest's
3524 * MSRs if the exit is due to a VM-entry failure that occurs
3525 * during or after loading the guest state. Since this exit
3526 * does not fall in that category, we need to save the MSRs.
3528 if (nested_vmx_store_msr(vcpu,
3529 vmcs12->vm_exit_msr_store_addr,
3530 vmcs12->vm_exit_msr_store_count))
3531 nested_vmx_abort(vcpu,
3532 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
3536 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
3537 * preserved above and would only end up incorrectly in L1.
3539 vcpu->arch.nmi_injected = false;
3540 kvm_clear_exception_queue(vcpu);
3541 kvm_clear_interrupt_queue(vcpu);
3545 * A part of what we need to when the nested L2 guest exits and we want to
3546 * run its L1 parent, is to reset L1's guest state to the host state specified
3548 * This function is to be called not only on normal nested exit, but also on
3549 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
3550 * Failures During or After Loading Guest State").
3551 * This function should be called when the active VMCS is L1's (vmcs01).
3553 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3554 struct vmcs12 *vmcs12)
3556 struct kvm_segment seg;
3557 u32 entry_failure_code;
3559 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
3560 vcpu->arch.efer = vmcs12->host_ia32_efer;
3561 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3562 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
3564 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
3565 vmx_set_efer(vcpu, vcpu->arch.efer);
3567 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
3568 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
3569 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
3570 vmx_set_interrupt_shadow(vcpu, 0);
3573 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
3574 * actually changed, because vmx_set_cr0 refers to efer set above.
3576 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
3577 * (KVM doesn't change it);
3579 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3580 vmx_set_cr0(vcpu, vmcs12->host_cr0);
3582 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
3583 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3584 vmx_set_cr4(vcpu, vmcs12->host_cr4);
3586 nested_ept_uninit_mmu_context(vcpu);
3589 * Only PDPTE load can fail as the value of cr3 was checked on entry and
3590 * couldn't have changed.
3592 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
3593 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
3596 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
3599 * If vmcs01 doesn't use VPID, CPU flushes TLB on every
3600 * VMEntry/VMExit. Thus, no need to flush TLB.
3602 * If vmcs12 doesn't use VPID, L1 expects TLB to be
3603 * flushed on every VMEntry/VMExit.
3605 * Otherwise, we can preserve TLB entries as long as we are
3606 * able to tag L1 TLB entries differently than L2 TLB entries.
3608 * If vmcs12 uses EPT, we need to execute this flush on EPTP01
3609 * and therefore we request the TLB flush to happen only after VMCS EPTP
3610 * has been set by KVM_REQ_LOAD_CR3.
3613 (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
3614 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3617 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
3618 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
3619 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
3620 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
3621 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
3622 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
3623 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
3625 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
3626 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
3627 vmcs_write64(GUEST_BNDCFGS, 0);
3629 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
3630 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
3631 vcpu->arch.pat = vmcs12->host_ia32_pat;
3633 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
3634 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
3635 vmcs12->host_ia32_perf_global_ctrl);
3637 /* Set L1 segment info according to Intel SDM
3638 27.5.2 Loading Host Segment and Descriptor-Table Registers */
3639 seg = (struct kvm_segment) {
3641 .limit = 0xFFFFFFFF,
3642 .selector = vmcs12->host_cs_selector,
3648 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
3652 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
3653 seg = (struct kvm_segment) {
3655 .limit = 0xFFFFFFFF,
3662 seg.selector = vmcs12->host_ds_selector;
3663 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
3664 seg.selector = vmcs12->host_es_selector;
3665 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
3666 seg.selector = vmcs12->host_ss_selector;
3667 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
3668 seg.selector = vmcs12->host_fs_selector;
3669 seg.base = vmcs12->host_fs_base;
3670 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
3671 seg.selector = vmcs12->host_gs_selector;
3672 seg.base = vmcs12->host_gs_base;
3673 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
3674 seg = (struct kvm_segment) {
3675 .base = vmcs12->host_tr_base,
3677 .selector = vmcs12->host_tr_selector,
3681 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
3683 kvm_set_dr(vcpu, 7, 0x400);
3684 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
3686 if (cpu_has_vmx_msr_bitmap())
3687 vmx_update_msr_bitmap(vcpu);
3689 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
3690 vmcs12->vm_exit_msr_load_count))
3691 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3694 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
3696 struct shared_msr_entry *efer_msr;
3699 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
3700 return vmcs_read64(GUEST_IA32_EFER);
3702 if (cpu_has_load_ia32_efer())
3705 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
3706 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
3707 return vmx->msr_autoload.guest.val[i].value;
3710 efer_msr = find_msr_entry(vmx, MSR_EFER);
3712 return efer_msr->data;
3717 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
3719 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3720 struct vcpu_vmx *vmx = to_vmx(vcpu);
3721 struct vmx_msr_entry g, h;
3722 struct msr_data msr;
3726 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
3728 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
3730 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
3731 * as vmcs01.GUEST_DR7 contains a userspace defined value
3732 * and vcpu->arch.dr7 is not squirreled away before the
3733 * nested VMENTER (not worth adding a variable in nested_vmx).
3735 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
3736 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
3738 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
3742 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
3743 * handle a variety of side effects to KVM's software model.
3745 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
3747 vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
3748 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
3750 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
3751 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
3753 nested_ept_uninit_mmu_context(vcpu);
3754 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3755 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3758 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
3759 * from vmcs01 (if necessary). The PDPTRs are not loaded on
3760 * VMFail, like everything else we just need to ensure our
3761 * software model is up-to-date.
3763 ept_save_pdptrs(vcpu);
3765 kvm_mmu_reset_context(vcpu);
3767 if (cpu_has_vmx_msr_bitmap())
3768 vmx_update_msr_bitmap(vcpu);
3771 * This nasty bit of open coding is a compromise between blindly
3772 * loading L1's MSRs using the exit load lists (incorrect emulation
3773 * of VMFail), leaving the nested VM's MSRs in the software model
3774 * (incorrect behavior) and snapshotting the modified MSRs (too
3775 * expensive since the lists are unbound by hardware). For each
3776 * MSR that was (prematurely) loaded from the nested VMEntry load
3777 * list, reload it from the exit load list if it exists and differs
3778 * from the guest value. The intent is to stuff host state as
3779 * silently as possible, not to fully process the exit load list.
3781 msr.host_initiated = false;
3782 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
3783 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
3784 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
3785 pr_debug_ratelimited(
3786 "%s read MSR index failed (%u, 0x%08llx)\n",
3791 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
3792 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
3793 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
3794 pr_debug_ratelimited(
3795 "%s read MSR failed (%u, 0x%08llx)\n",
3799 if (h.index != g.index)
3801 if (h.value == g.value)
3804 if (nested_vmx_load_msr_check(vcpu, &h)) {
3805 pr_debug_ratelimited(
3806 "%s check failed (%u, 0x%x, 0x%x)\n",
3807 __func__, j, h.index, h.reserved);
3811 msr.index = h.index;
3813 if (kvm_set_msr(vcpu, &msr)) {
3814 pr_debug_ratelimited(
3815 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
3816 __func__, j, h.index, h.value);
3825 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
3829 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
3830 * and modify vmcs12 to make it see what it would expect to see there if
3831 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
3833 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
3834 u32 exit_intr_info, unsigned long exit_qualification)
3836 struct vcpu_vmx *vmx = to_vmx(vcpu);
3837 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3839 /* trying to cancel vmlaunch/vmresume is a bug */
3840 WARN_ON_ONCE(vmx->nested.nested_run_pending);
3842 leave_guest_mode(vcpu);
3844 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
3845 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3847 if (likely(!vmx->fail)) {
3848 if (exit_reason == -1)
3849 sync_vmcs12(vcpu, vmcs12);
3851 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
3852 exit_qualification);
3855 * Must happen outside of sync_vmcs12() as it will
3856 * also be used to capture vmcs12 cache as part of
3857 * capturing nVMX state for snapshot (migration).
3859 * Otherwise, this flush will dirty guest memory at a
3860 * point it is already assumed by user-space to be
3863 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
3866 * The only expected VM-instruction error is "VM entry with
3867 * invalid control field(s)." Anything else indicates a
3868 * problem with L0. And we should never get here with a
3869 * VMFail of any type if early consistency checks are enabled.
3871 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
3872 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3873 WARN_ON_ONCE(nested_early_check);
3876 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3878 /* Update any VMCS fields that might have changed while L2 ran */
3879 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3880 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3881 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
3883 if (kvm_has_tsc_control)
3884 decache_tsc_multiplier(vmx);
3886 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
3887 vmx->nested.change_vmcs01_virtual_apic_mode = false;
3888 vmx_set_virtual_apic_mode(vcpu);
3889 } else if (!nested_cpu_has_ept(vmcs12) &&
3890 nested_cpu_has2(vmcs12,
3891 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3892 vmx_flush_tlb(vcpu, true);
3895 /* This is needed for same reason as it was needed in prepare_vmcs02 */
3898 /* Unpin physical memory we referred to in vmcs02 */
3899 if (vmx->nested.apic_access_page) {
3900 kvm_release_page_dirty(vmx->nested.apic_access_page);
3901 vmx->nested.apic_access_page = NULL;
3903 if (vmx->nested.virtual_apic_page) {
3904 kvm_release_page_dirty(vmx->nested.virtual_apic_page);
3905 vmx->nested.virtual_apic_page = NULL;
3907 if (vmx->nested.pi_desc_page) {
3908 kunmap(vmx->nested.pi_desc_page);
3909 kvm_release_page_dirty(vmx->nested.pi_desc_page);
3910 vmx->nested.pi_desc_page = NULL;
3911 vmx->nested.pi_desc = NULL;
3915 * We are now running in L2, mmu_notifier will force to reload the
3916 * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
3918 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
3920 if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
3921 vmx->nested.need_vmcs12_sync = true;
3923 /* in case we halted in L2 */
3924 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3926 if (likely(!vmx->fail)) {
3928 * TODO: SDM says that with acknowledge interrupt on
3929 * exit, bit 31 of the VM-exit interrupt information
3930 * (valid interrupt) is always set to 1 on
3931 * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
3932 * need kvm_cpu_has_interrupt(). See the commit
3933 * message for details.
3935 if (nested_exit_intr_ack_set(vcpu) &&
3936 exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
3937 kvm_cpu_has_interrupt(vcpu)) {
3938 int irq = kvm_cpu_get_interrupt(vcpu);
3940 vmcs12->vm_exit_intr_info = irq |
3941 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
3944 if (exit_reason != -1)
3945 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
3946 vmcs12->exit_qualification,
3947 vmcs12->idt_vectoring_info_field,
3948 vmcs12->vm_exit_intr_info,
3949 vmcs12->vm_exit_intr_error_code,
3952 load_vmcs12_host_state(vcpu, vmcs12);
3958 * After an early L2 VM-entry failure, we're now back
3959 * in L1 which thinks it just finished a VMLAUNCH or
3960 * VMRESUME instruction, so we need to set the failure
3961 * flag and the VM-instruction error field of the VMCS
3962 * accordingly, and skip the emulated instruction.
3964 (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3967 * Restore L1's host state to KVM's software model. We're here
3968 * because a consistency check was caught by hardware, which
3969 * means some amount of guest state has been propagated to KVM's
3970 * model and needs to be unwound to the host's state.
3972 nested_vmx_restore_host_state(vcpu);
3978 * Decode the memory-address operand of a vmx instruction, as recorded on an
3979 * exit caused by such an instruction (run by a guest hypervisor).
3980 * On success, returns 0. When the operand is invalid, returns 1 and throws
3983 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
3984 u32 vmx_instruction_info, bool wr, gva_t *ret)
3988 struct kvm_segment s;
3991 * According to Vol. 3B, "Information for VM Exits Due to Instruction
3992 * Execution", on an exit, vmx_instruction_info holds most of the
3993 * addressing components of the operand. Only the displacement part
3994 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
3995 * For how an actual address is calculated from all these components,
3996 * refer to Vol. 1, "Operand Addressing".
3998 int scaling = vmx_instruction_info & 3;
3999 int addr_size = (vmx_instruction_info >> 7) & 7;
4000 bool is_reg = vmx_instruction_info & (1u << 10);
4001 int seg_reg = (vmx_instruction_info >> 15) & 7;
4002 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4003 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4004 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4005 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4008 kvm_queue_exception(vcpu, UD_VECTOR);
4012 /* Addr = segment_base + offset */
4013 /* offset = base + [index * scale] + displacement */
4014 off = exit_qualification; /* holds the displacement */
4016 off += kvm_register_read(vcpu, base_reg);
4018 off += kvm_register_read(vcpu, index_reg)<<scaling;
4019 vmx_get_segment(vcpu, &s, seg_reg);
4020 *ret = s.base + off;
4022 if (addr_size == 1) /* 32 bit */
4025 /* Checks for #GP/#SS exceptions. */
4027 if (is_long_mode(vcpu)) {
4028 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4029 * non-canonical form. This is the only check on the memory
4030 * destination for long mode!
4032 exn = is_noncanonical_address(*ret, vcpu);
4033 } else if (is_protmode(vcpu)) {
4034 /* Protected mode: apply checks for segment validity in the
4036 * - segment type check (#GP(0) may be thrown)
4037 * - usability check (#GP(0)/#SS(0))
4038 * - limit check (#GP(0)/#SS(0))
4041 /* #GP(0) if the destination operand is located in a
4042 * read-only data segment or any code segment.
4044 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4046 /* #GP(0) if the source operand is located in an
4047 * execute-only code segment
4049 exn = ((s.type & 0xa) == 8);
4051 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4054 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4056 exn = (s.unusable != 0);
4057 /* Protected mode: #GP(0)/#SS(0) if the memory
4058 * operand is outside the segment limit.
4060 exn = exn || (off + sizeof(u64) > s.limit);
4063 kvm_queue_exception_e(vcpu,
4064 seg_reg == VCPU_SREG_SS ?
4065 SS_VECTOR : GP_VECTOR,
4073 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
4076 struct x86_exception e;
4078 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4079 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
4082 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
4083 kvm_inject_page_fault(vcpu, &e);
4091 * Allocate a shadow VMCS and associate it with the currently loaded
4092 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4093 * VMCS is also VMCLEARed, so that it is ready for use.
4095 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4097 struct vcpu_vmx *vmx = to_vmx(vcpu);
4098 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4101 * We should allocate a shadow vmcs for vmcs01 only when L1
4102 * executes VMXON and free it when L1 executes VMXOFF.
4103 * As it is invalid to execute VMXON twice, we shouldn't reach
4104 * here when vmcs01 already have an allocated shadow vmcs.
4106 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4108 if (!loaded_vmcs->shadow_vmcs) {
4109 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4110 if (loaded_vmcs->shadow_vmcs)
4111 vmcs_clear(loaded_vmcs->shadow_vmcs);
4113 return loaded_vmcs->shadow_vmcs;
4116 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4118 struct vcpu_vmx *vmx = to_vmx(vcpu);
4121 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4125 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
4126 if (!vmx->nested.cached_vmcs12)
4127 goto out_cached_vmcs12;
4129 vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
4130 if (!vmx->nested.cached_shadow_vmcs12)
4131 goto out_cached_shadow_vmcs12;
4133 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4134 goto out_shadow_vmcs;
4136 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4137 HRTIMER_MODE_REL_PINNED);
4138 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4140 vmx->nested.vpid02 = allocate_vpid();
4142 vmx->nested.vmcs02_initialized = false;
4143 vmx->nested.vmxon = true;
4147 kfree(vmx->nested.cached_shadow_vmcs12);
4149 out_cached_shadow_vmcs12:
4150 kfree(vmx->nested.cached_vmcs12);
4153 free_loaded_vmcs(&vmx->nested.vmcs02);
4160 * Emulate the VMXON instruction.
4161 * Currently, we just remember that VMX is active, and do not save or even
4162 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4163 * do not currently need to store anything in that guest-allocated memory
4164 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4165 * argument is different from the VMXON pointer (which the spec says they do).
4167 static int handle_vmon(struct kvm_vcpu *vcpu)
4172 struct vcpu_vmx *vmx = to_vmx(vcpu);
4173 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
4174 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
4177 * The Intel VMX Instruction Reference lists a bunch of bits that are
4178 * prerequisite to running VMXON, most notably cr4.VMXE must be set to
4179 * 1 (see vmx_set_cr4() for when we allow the guest to set this).
4180 * Otherwise, we should fail with #UD. But most faulting conditions
4181 * have already been checked by hardware, prior to the VM-exit for
4182 * VMXON. We do test guest cr4.VMXE because processor CR4 always has
4183 * that bit set to 1 in non-root mode.
4185 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4186 kvm_queue_exception(vcpu, UD_VECTOR);
4190 /* CPL=0 must be checked manually. */
4191 if (vmx_get_cpl(vcpu)) {
4192 kvm_inject_gp(vcpu, 0);
4196 if (vmx->nested.vmxon)
4197 return nested_vmx_failValid(vcpu,
4198 VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
4200 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4201 != VMXON_NEEDED_FEATURES) {
4202 kvm_inject_gp(vcpu, 0);
4206 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4211 * The first 4 bytes of VMXON region contain the supported
4212 * VMCS revision identifier
4214 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4215 * which replaces physical address width with 32
4217 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
4218 return nested_vmx_failInvalid(vcpu);
4220 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
4221 if (is_error_page(page))
4222 return nested_vmx_failInvalid(vcpu);
4224 if (*(u32 *)kmap(page) != VMCS12_REVISION) {
4226 kvm_release_page_clean(page);
4227 return nested_vmx_failInvalid(vcpu);
4230 kvm_release_page_clean(page);
4232 vmx->nested.vmxon_ptr = vmptr;
4233 ret = enter_vmx_operation(vcpu);
4237 return nested_vmx_succeed(vcpu);
4240 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
4242 struct vcpu_vmx *vmx = to_vmx(vcpu);
4244 if (vmx->nested.current_vmptr == -1ull)
4247 if (enable_shadow_vmcs) {
4248 /* copy to memory all shadowed fields in case
4249 they were modified */
4250 copy_shadow_to_vmcs12(vmx);
4251 vmx->nested.need_vmcs12_sync = false;
4252 vmx_disable_shadow_vmcs(vmx);
4254 vmx->nested.posted_intr_nv = -1;
4256 /* Flush VMCS12 to guest memory */
4257 kvm_vcpu_write_guest_page(vcpu,
4258 vmx->nested.current_vmptr >> PAGE_SHIFT,
4259 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
4261 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4263 vmx->nested.current_vmptr = -1ull;
4266 /* Emulate the VMXOFF instruction */
4267 static int handle_vmoff(struct kvm_vcpu *vcpu)
4269 if (!nested_vmx_check_permission(vcpu))
4272 return nested_vmx_succeed(vcpu);
4275 /* Emulate the VMCLEAR instruction */
4276 static int handle_vmclear(struct kvm_vcpu *vcpu)
4278 struct vcpu_vmx *vmx = to_vmx(vcpu);
4282 if (!nested_vmx_check_permission(vcpu))
4285 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4288 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
4289 return nested_vmx_failValid(vcpu,
4290 VMXERR_VMCLEAR_INVALID_ADDRESS);
4292 if (vmptr == vmx->nested.vmxon_ptr)
4293 return nested_vmx_failValid(vcpu,
4294 VMXERR_VMCLEAR_VMXON_POINTER);
4296 if (vmx->nested.hv_evmcs_page) {
4297 if (vmptr == vmx->nested.hv_evmcs_vmptr)
4298 nested_release_evmcs(vcpu);
4300 if (vmptr == vmx->nested.current_vmptr)
4301 nested_release_vmcs12(vcpu);
4303 kvm_vcpu_write_guest(vcpu,
4304 vmptr + offsetof(struct vmcs12,
4306 &zero, sizeof(zero));
4309 return nested_vmx_succeed(vcpu);
4312 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
4314 /* Emulate the VMLAUNCH instruction */
4315 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
4317 return nested_vmx_run(vcpu, true);
4320 /* Emulate the VMRESUME instruction */
4321 static int handle_vmresume(struct kvm_vcpu *vcpu)
4324 return nested_vmx_run(vcpu, false);
4327 static int handle_vmread(struct kvm_vcpu *vcpu)
4329 unsigned long field;
4331 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4332 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4334 struct vmcs12 *vmcs12;
4336 if (!nested_vmx_check_permission(vcpu))
4339 if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
4340 return nested_vmx_failInvalid(vcpu);
4342 if (!is_guest_mode(vcpu))
4343 vmcs12 = get_vmcs12(vcpu);
4346 * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
4347 * to shadowed-field sets the ALU flags for VMfailInvalid.
4349 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4350 return nested_vmx_failInvalid(vcpu);
4351 vmcs12 = get_shadow_vmcs12(vcpu);
4354 /* Decode instruction info and find the field to read */
4355 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4356 /* Read the field, zero-extended to a u64 field_value */
4357 if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
4358 return nested_vmx_failValid(vcpu,
4359 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4362 * Now copy part of this value to register or memory, as requested.
4363 * Note that the number of bits actually copied is 32 or 64 depending
4364 * on the guest's mode (32 or 64 bit), not on the given field's length.
4366 if (vmx_instruction_info & (1u << 10)) {
4367 kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
4370 if (get_vmx_mem_address(vcpu, exit_qualification,
4371 vmx_instruction_info, true, &gva))
4373 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
4374 kvm_write_guest_virt_system(vcpu, gva, &field_value,
4375 (is_long_mode(vcpu) ? 8 : 4), NULL);
4378 return nested_vmx_succeed(vcpu);
4382 static int handle_vmwrite(struct kvm_vcpu *vcpu)
4384 unsigned long field;
4386 struct vcpu_vmx *vmx = to_vmx(vcpu);
4387 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4388 u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4390 /* The value to write might be 32 or 64 bits, depending on L1's long
4391 * mode, and eventually we need to write that into a field of several
4392 * possible lengths. The code below first zero-extends the value to 64
4393 * bit (field_value), and then copies only the appropriate number of
4394 * bits into the vmcs12 field.
4396 u64 field_value = 0;
4397 struct x86_exception e;
4398 struct vmcs12 *vmcs12;
4400 if (!nested_vmx_check_permission(vcpu))
4403 if (vmx->nested.current_vmptr == -1ull)
4404 return nested_vmx_failInvalid(vcpu);
4406 if (vmx_instruction_info & (1u << 10))
4407 field_value = kvm_register_readl(vcpu,
4408 (((vmx_instruction_info) >> 3) & 0xf));
4410 if (get_vmx_mem_address(vcpu, exit_qualification,
4411 vmx_instruction_info, false, &gva))
4413 if (kvm_read_guest_virt(vcpu, gva, &field_value,
4414 (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
4415 kvm_inject_page_fault(vcpu, &e);
4421 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4423 * If the vCPU supports "VMWRITE to any supported field in the
4424 * VMCS," then the "read-only" fields are actually read/write.
4426 if (vmcs_field_readonly(field) &&
4427 !nested_cpu_has_vmwrite_any_field(vcpu))
4428 return nested_vmx_failValid(vcpu,
4429 VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
4431 if (!is_guest_mode(vcpu))
4432 vmcs12 = get_vmcs12(vcpu);
4435 * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
4436 * to shadowed-field sets the ALU flags for VMfailInvalid.
4438 if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
4439 return nested_vmx_failInvalid(vcpu);
4440 vmcs12 = get_shadow_vmcs12(vcpu);
4443 if (vmcs12_write_any(vmcs12, field, field_value) < 0)
4444 return nested_vmx_failValid(vcpu,
4445 VMXERR_UNSUPPORTED_VMCS_COMPONENT);
4448 * Do not track vmcs12 dirty-state if in guest-mode
4449 * as we actually dirty shadow vmcs12 instead of vmcs12.
4451 if (!is_guest_mode(vcpu)) {
4453 #define SHADOW_FIELD_RW(x) case x:
4454 #include "vmcs_shadow_fields.h"
4456 * The fields that can be updated by L1 without a vmexit are
4457 * always updated in the vmcs02, the others go down the slow
4458 * path of prepare_vmcs02.
4462 vmx->nested.dirty_vmcs12 = true;
4467 return nested_vmx_succeed(vcpu);
4470 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
4472 vmx->nested.current_vmptr = vmptr;
4473 if (enable_shadow_vmcs) {
4474 vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
4475 SECONDARY_EXEC_SHADOW_VMCS);
4476 vmcs_write64(VMCS_LINK_POINTER,
4477 __pa(vmx->vmcs01.shadow_vmcs));
4478 vmx->nested.need_vmcs12_sync = true;
4480 vmx->nested.dirty_vmcs12 = true;
4483 /* Emulate the VMPTRLD instruction */
4484 static int handle_vmptrld(struct kvm_vcpu *vcpu)
4486 struct vcpu_vmx *vmx = to_vmx(vcpu);
4489 if (!nested_vmx_check_permission(vcpu))
4492 if (nested_vmx_get_vmptr(vcpu, &vmptr))
4495 if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
4496 return nested_vmx_failValid(vcpu,
4497 VMXERR_VMPTRLD_INVALID_ADDRESS);
4499 if (vmptr == vmx->nested.vmxon_ptr)
4500 return nested_vmx_failValid(vcpu,
4501 VMXERR_VMPTRLD_VMXON_POINTER);
4503 /* Forbid normal VMPTRLD if Enlightened version was used */
4504 if (vmx->nested.hv_evmcs)
4507 if (vmx->nested.current_vmptr != vmptr) {
4508 struct vmcs12 *new_vmcs12;
4511 page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
4512 if (is_error_page(page)) {
4514 * Reads from an unbacked page return all 1s,
4515 * which means that the 32 bits located at the
4516 * given physical address won't match the required
4517 * VMCS12_REVISION identifier.
4519 nested_vmx_failValid(vcpu,
4520 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4521 return kvm_skip_emulated_instruction(vcpu);
4523 new_vmcs12 = kmap(page);
4524 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
4525 (new_vmcs12->hdr.shadow_vmcs &&
4526 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
4528 kvm_release_page_clean(page);
4529 return nested_vmx_failValid(vcpu,
4530 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
4533 nested_release_vmcs12(vcpu);
4536 * Load VMCS12 from guest memory since it is not already
4539 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
4541 kvm_release_page_clean(page);
4543 set_current_vmptr(vmx, vmptr);
4546 return nested_vmx_succeed(vcpu);
4549 /* Emulate the VMPTRST instruction */
4550 static int handle_vmptrst(struct kvm_vcpu *vcpu)
4552 unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
4553 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4554 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
4555 struct x86_exception e;
4558 if (!nested_vmx_check_permission(vcpu))
4561 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
4564 if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
4566 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
4567 if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
4568 sizeof(gpa_t), &e)) {
4569 kvm_inject_page_fault(vcpu, &e);
4572 return nested_vmx_succeed(vcpu);
4575 /* Emulate the INVEPT instruction */
4576 static int handle_invept(struct kvm_vcpu *vcpu)
4578 struct vcpu_vmx *vmx = to_vmx(vcpu);
4579 u32 vmx_instruction_info, types;
4582 struct x86_exception e;
4587 if (!(vmx->nested.msrs.secondary_ctls_high &
4588 SECONDARY_EXEC_ENABLE_EPT) ||
4589 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
4590 kvm_queue_exception(vcpu, UD_VECTOR);
4594 if (!nested_vmx_check_permission(vcpu))
4597 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4598 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4600 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
4602 if (type >= 32 || !(types & (1 << type)))
4603 return nested_vmx_failValid(vcpu,
4604 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4606 /* According to the Intel VMX instruction reference, the memory
4607 * operand is read even if it isn't needed (e.g., for type==global)
4609 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4610 vmx_instruction_info, false, &gva))
4612 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4613 kvm_inject_page_fault(vcpu, &e);
4618 case VMX_EPT_EXTENT_GLOBAL:
4620 * TODO: track mappings and invalidate
4621 * single context requests appropriately
4623 case VMX_EPT_EXTENT_CONTEXT:
4624 kvm_mmu_sync_roots(vcpu);
4625 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4632 return nested_vmx_succeed(vcpu);
4635 static int handle_invvpid(struct kvm_vcpu *vcpu)
4637 struct vcpu_vmx *vmx = to_vmx(vcpu);
4638 u32 vmx_instruction_info;
4639 unsigned long type, types;
4641 struct x86_exception e;
4648 if (!(vmx->nested.msrs.secondary_ctls_high &
4649 SECONDARY_EXEC_ENABLE_VPID) ||
4650 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
4651 kvm_queue_exception(vcpu, UD_VECTOR);
4655 if (!nested_vmx_check_permission(vcpu))
4658 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4659 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
4661 types = (vmx->nested.msrs.vpid_caps &
4662 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
4664 if (type >= 32 || !(types & (1 << type)))
4665 return nested_vmx_failValid(vcpu,
4666 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4668 /* according to the intel vmx instruction reference, the memory
4669 * operand is read even if it isn't needed (e.g., for type==global)
4671 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
4672 vmx_instruction_info, false, &gva))
4674 if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
4675 kvm_inject_page_fault(vcpu, &e);
4678 if (operand.vpid >> 16)
4679 return nested_vmx_failValid(vcpu,
4680 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4682 vpid02 = nested_get_vpid02(vcpu);
4684 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
4685 if (!operand.vpid ||
4686 is_noncanonical_address(operand.gla, vcpu))
4687 return nested_vmx_failValid(vcpu,
4688 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4689 if (cpu_has_vmx_invvpid_individual_addr()) {
4690 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
4691 vpid02, operand.gla);
4693 __vmx_flush_tlb(vcpu, vpid02, false);
4695 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
4696 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
4698 return nested_vmx_failValid(vcpu,
4699 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
4700 __vmx_flush_tlb(vcpu, vpid02, false);
4702 case VMX_VPID_EXTENT_ALL_CONTEXT:
4703 __vmx_flush_tlb(vcpu, vpid02, false);
4707 return kvm_skip_emulated_instruction(vcpu);
4710 return nested_vmx_succeed(vcpu);
4713 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
4714 struct vmcs12 *vmcs12)
4716 u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
4718 bool accessed_dirty;
4719 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
4721 if (!nested_cpu_has_eptp_switching(vmcs12) ||
4722 !nested_cpu_has_ept(vmcs12))
4725 if (index >= VMFUNC_EPTP_ENTRIES)
4729 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
4730 &address, index * 8, 8))
4733 accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
4736 * If the (L2) guest does a vmfunc to the currently
4737 * active ept pointer, we don't have to do anything else
4739 if (vmcs12->ept_pointer != address) {
4740 if (!valid_ept_address(vcpu, address))
4743 kvm_mmu_unload(vcpu);
4744 mmu->ept_ad = accessed_dirty;
4745 mmu->mmu_role.base.ad_disabled = !accessed_dirty;
4746 vmcs12->ept_pointer = address;
4748 * TODO: Check what's the correct approach in case
4749 * mmu reload fails. Currently, we just let the next
4750 * reload potentially fail
4752 kvm_mmu_reload(vcpu);
4758 static int handle_vmfunc(struct kvm_vcpu *vcpu)
4760 struct vcpu_vmx *vmx = to_vmx(vcpu);
4761 struct vmcs12 *vmcs12;
4762 u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
4765 * VMFUNC is only supported for nested guests, but we always enable the
4766 * secondary control for simplicity; for non-nested mode, fake that we
4767 * didn't by injecting #UD.
4769 if (!is_guest_mode(vcpu)) {
4770 kvm_queue_exception(vcpu, UD_VECTOR);
4774 vmcs12 = get_vmcs12(vcpu);
4775 if ((vmcs12->vm_function_control & (1 << function)) == 0)
4780 if (nested_vmx_eptp_switching(vcpu, vmcs12))
4786 return kvm_skip_emulated_instruction(vcpu);
4789 nested_vmx_vmexit(vcpu, vmx->exit_reason,
4790 vmcs_read32(VM_EXIT_INTR_INFO),
4791 vmcs_readl(EXIT_QUALIFICATION));
4796 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
4797 struct vmcs12 *vmcs12)
4799 unsigned long exit_qualification;
4800 gpa_t bitmap, last_bitmap;
4805 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
4806 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
4808 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4810 port = exit_qualification >> 16;
4811 size = (exit_qualification & 7) + 1;
4813 last_bitmap = (gpa_t)-1;
4818 bitmap = vmcs12->io_bitmap_a;
4819 else if (port < 0x10000)
4820 bitmap = vmcs12->io_bitmap_b;
4823 bitmap += (port & 0x7fff) / 8;
4825 if (last_bitmap != bitmap)
4826 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
4828 if (b & (1 << (port & 7)))
4833 last_bitmap = bitmap;
4840 * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
4841 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
4842 * disinterest in the current event (read or write a specific MSR) by using an
4843 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
4845 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
4846 struct vmcs12 *vmcs12, u32 exit_reason)
4848 u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
4851 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
4855 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
4856 * for the four combinations of read/write and low/high MSR numbers.
4857 * First we need to figure out which of the four to use:
4859 bitmap = vmcs12->msr_bitmap;
4860 if (exit_reason == EXIT_REASON_MSR_WRITE)
4862 if (msr_index >= 0xc0000000) {
4863 msr_index -= 0xc0000000;
4867 /* Then read the msr_index'th bit from this bitmap: */
4868 if (msr_index < 1024*8) {
4870 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
4872 return 1 & (b >> (msr_index & 7));
4874 return true; /* let L1 handle the wrong parameter */
4878 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
4879 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
4880 * intercept (via guest_host_mask etc.) the current event.
4882 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
4883 struct vmcs12 *vmcs12)
4885 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4886 int cr = exit_qualification & 15;
4890 switch ((exit_qualification >> 4) & 3) {
4891 case 0: /* mov to cr */
4892 reg = (exit_qualification >> 8) & 15;
4893 val = kvm_register_readl(vcpu, reg);
4896 if (vmcs12->cr0_guest_host_mask &
4897 (val ^ vmcs12->cr0_read_shadow))
4901 if ((vmcs12->cr3_target_count >= 1 &&
4902 vmcs12->cr3_target_value0 == val) ||
4903 (vmcs12->cr3_target_count >= 2 &&
4904 vmcs12->cr3_target_value1 == val) ||
4905 (vmcs12->cr3_target_count >= 3 &&
4906 vmcs12->cr3_target_value2 == val) ||
4907 (vmcs12->cr3_target_count >= 4 &&
4908 vmcs12->cr3_target_value3 == val))
4910 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
4914 if (vmcs12->cr4_guest_host_mask &
4915 (vmcs12->cr4_read_shadow ^ val))
4919 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
4925 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
4926 (vmcs12->cr0_read_shadow & X86_CR0_TS))
4929 case 1: /* mov from cr */
4932 if (vmcs12->cpu_based_vm_exec_control &
4933 CPU_BASED_CR3_STORE_EXITING)
4937 if (vmcs12->cpu_based_vm_exec_control &
4938 CPU_BASED_CR8_STORE_EXITING)
4945 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
4946 * cr0. Other attempted changes are ignored, with no exit.
4948 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
4949 if (vmcs12->cr0_guest_host_mask & 0xe &
4950 (val ^ vmcs12->cr0_read_shadow))
4952 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
4953 !(vmcs12->cr0_read_shadow & 0x1) &&
4961 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
4962 struct vmcs12 *vmcs12, gpa_t bitmap)
4964 u32 vmx_instruction_info;
4965 unsigned long field;
4968 if (!nested_cpu_has_shadow_vmcs(vmcs12))
4971 /* Decode instruction info and find the field to access */
4972 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4973 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
4975 /* Out-of-range fields always cause a VM exit from L2 to L1 */
4979 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
4982 return 1 & (b >> (field & 7));
4986 * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
4987 * should handle it ourselves in L0 (and then continue L2). Only call this
4988 * when in is_guest_mode (L2).
4990 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
4992 u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
4993 struct vcpu_vmx *vmx = to_vmx(vcpu);
4994 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4996 if (vmx->nested.nested_run_pending)
4999 if (unlikely(vmx->fail)) {
5000 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
5001 vmcs_read32(VM_INSTRUCTION_ERROR));
5006 * The host physical addresses of some pages of guest memory
5007 * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
5008 * Page). The CPU may write to these pages via their host
5009 * physical address while L2 is running, bypassing any
5010 * address-translation-based dirty tracking (e.g. EPT write
5013 * Mark them dirty on every exit from L2 to prevent them from
5014 * getting out of sync with dirty tracking.
5016 nested_mark_vmcs12_pages_dirty(vcpu);
5018 trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
5019 vmcs_readl(EXIT_QUALIFICATION),
5020 vmx->idt_vectoring_info,
5022 vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
5025 switch (exit_reason) {
5026 case EXIT_REASON_EXCEPTION_NMI:
5027 if (is_nmi(intr_info))
5029 else if (is_page_fault(intr_info))
5030 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
5031 else if (is_debug(intr_info) &&
5033 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5035 else if (is_breakpoint(intr_info) &&
5036 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5038 return vmcs12->exception_bitmap &
5039 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5040 case EXIT_REASON_EXTERNAL_INTERRUPT:
5042 case EXIT_REASON_TRIPLE_FAULT:
5044 case EXIT_REASON_PENDING_INTERRUPT:
5045 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
5046 case EXIT_REASON_NMI_WINDOW:
5047 return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
5048 case EXIT_REASON_TASK_SWITCH:
5050 case EXIT_REASON_CPUID:
5052 case EXIT_REASON_HLT:
5053 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5054 case EXIT_REASON_INVD:
5056 case EXIT_REASON_INVLPG:
5057 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5058 case EXIT_REASON_RDPMC:
5059 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5060 case EXIT_REASON_RDRAND:
5061 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5062 case EXIT_REASON_RDSEED:
5063 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5064 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
5065 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5066 case EXIT_REASON_VMREAD:
5067 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5068 vmcs12->vmread_bitmap);
5069 case EXIT_REASON_VMWRITE:
5070 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5071 vmcs12->vmwrite_bitmap);
5072 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
5073 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
5074 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
5075 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
5076 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
5078 * VMX instructions trap unconditionally. This allows L1 to
5079 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5082 case EXIT_REASON_CR_ACCESS:
5083 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5084 case EXIT_REASON_DR_ACCESS:
5085 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5086 case EXIT_REASON_IO_INSTRUCTION:
5087 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5088 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
5089 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5090 case EXIT_REASON_MSR_READ:
5091 case EXIT_REASON_MSR_WRITE:
5092 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5093 case EXIT_REASON_INVALID_STATE:
5095 case EXIT_REASON_MWAIT_INSTRUCTION:
5096 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5097 case EXIT_REASON_MONITOR_TRAP_FLAG:
5098 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
5099 case EXIT_REASON_MONITOR_INSTRUCTION:
5100 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
5101 case EXIT_REASON_PAUSE_INSTRUCTION:
5102 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
5103 nested_cpu_has2(vmcs12,
5104 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
5105 case EXIT_REASON_MCE_DURING_VMENTRY:
5107 case EXIT_REASON_TPR_BELOW_THRESHOLD:
5108 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
5109 case EXIT_REASON_APIC_ACCESS:
5110 case EXIT_REASON_APIC_WRITE:
5111 case EXIT_REASON_EOI_INDUCED:
5113 * The controls for "virtualize APIC accesses," "APIC-
5114 * register virtualization," and "virtual-interrupt
5115 * delivery" only come from vmcs12.
5118 case EXIT_REASON_EPT_VIOLATION:
5120 * L0 always deals with the EPT violation. If nested EPT is
5121 * used, and the nested mmu code discovers that the address is
5122 * missing in the guest EPT table (EPT12), the EPT violation
5123 * will be injected with nested_ept_inject_page_fault()
5126 case EXIT_REASON_EPT_MISCONFIG:
5128 * L2 never uses directly L1's EPT, but rather L0's own EPT
5129 * table (shadow on EPT) or a merged EPT table that L0 built
5130 * (EPT on EPT). So any problems with the structure of the
5131 * table is L0's fault.
5134 case EXIT_REASON_INVPCID:
5136 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
5137 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5138 case EXIT_REASON_WBINVD:
5139 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
5140 case EXIT_REASON_XSETBV:
5142 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
5144 * This should never happen, since it is not possible to
5145 * set XSS to a non-zero value---neither in L1 nor in L2.
5146 * If if it were, XSS would have to be checked against
5147 * the XSS exit bitmap in vmcs12.
5149 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
5150 case EXIT_REASON_PREEMPTION_TIMER:
5152 case EXIT_REASON_PML_FULL:
5153 /* We emulate PML support to L1. */
5155 case EXIT_REASON_VMFUNC:
5156 /* VM functions are emulated through L2->L0 vmexits. */
5158 case EXIT_REASON_ENCLS:
5159 /* SGX is never exposed to L1 */
5167 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
5168 struct kvm_nested_state __user *user_kvm_nested_state,
5171 struct vcpu_vmx *vmx;
5172 struct vmcs12 *vmcs12;
5173 struct kvm_nested_state kvm_state = {
5176 .size = sizeof(kvm_state),
5177 .vmx.vmxon_pa = -1ull,
5178 .vmx.vmcs_pa = -1ull,
5182 return kvm_state.size + 2 * VMCS12_SIZE;
5185 vmcs12 = get_vmcs12(vcpu);
5187 if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
5188 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
5190 if (nested_vmx_allowed(vcpu) &&
5191 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
5192 kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
5193 kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
5195 if (vmx_has_valid_vmcs12(vcpu)) {
5196 kvm_state.size += VMCS12_SIZE;
5198 if (is_guest_mode(vcpu) &&
5199 nested_cpu_has_shadow_vmcs(vmcs12) &&
5200 vmcs12->vmcs_link_pointer != -1ull)
5201 kvm_state.size += VMCS12_SIZE;
5204 if (vmx->nested.smm.vmxon)
5205 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
5207 if (vmx->nested.smm.guest_mode)
5208 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
5210 if (is_guest_mode(vcpu)) {
5211 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
5213 if (vmx->nested.nested_run_pending)
5214 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
5218 if (user_data_size < kvm_state.size)
5221 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
5224 if (!vmx_has_valid_vmcs12(vcpu))
5228 * When running L2, the authoritative vmcs12 state is in the
5229 * vmcs02. When running L1, the authoritative vmcs12 state is
5230 * in the shadow or enlightened vmcs linked to vmcs01, unless
5231 * need_vmcs12_sync is set, in which case, the authoritative
5232 * vmcs12 state is in the vmcs12 already.
5234 if (is_guest_mode(vcpu)) {
5235 sync_vmcs12(vcpu, vmcs12);
5236 } else if (!vmx->nested.need_vmcs12_sync) {
5237 if (vmx->nested.hv_evmcs)
5238 copy_enlightened_to_vmcs12(vmx);
5239 else if (enable_shadow_vmcs)
5240 copy_shadow_to_vmcs12(vmx);
5243 if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
5246 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5247 vmcs12->vmcs_link_pointer != -1ull) {
5248 if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
5249 get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
5254 return kvm_state.size;
5258 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
5260 void vmx_leave_nested(struct kvm_vcpu *vcpu)
5262 if (is_guest_mode(vcpu)) {
5263 to_vmx(vcpu)->nested.nested_run_pending = 0;
5264 nested_vmx_vmexit(vcpu, -1, 0, 0);
5269 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
5270 struct kvm_nested_state __user *user_kvm_nested_state,
5271 struct kvm_nested_state *kvm_state)
5273 struct vcpu_vmx *vmx = to_vmx(vcpu);
5274 struct vmcs12 *vmcs12;
5278 if (kvm_state->format != 0)
5281 if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
5282 nested_enable_evmcs(vcpu, NULL);
5284 if (!nested_vmx_allowed(vcpu))
5285 return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
5287 if (kvm_state->vmx.vmxon_pa == -1ull) {
5288 if (kvm_state->vmx.smm.flags)
5291 if (kvm_state->vmx.vmcs_pa != -1ull)
5294 vmx_leave_nested(vcpu);
5298 if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
5301 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5302 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5305 if (kvm_state->vmx.smm.flags &
5306 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
5310 * SMM temporarily disables VMX, so we cannot be in guest mode,
5311 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
5314 if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
5317 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
5318 !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
5321 vmx_leave_nested(vcpu);
5322 if (kvm_state->vmx.vmxon_pa == -1ull)
5325 vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
5326 ret = enter_vmx_operation(vcpu);
5330 /* Empty 'VMXON' state is permitted */
5331 if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
5334 if (kvm_state->vmx.vmcs_pa != -1ull) {
5335 if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
5336 !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
5339 set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
5340 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
5342 * Sync eVMCS upon entry as we may not have
5343 * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
5345 vmx->nested.need_vmcs12_sync = true;
5350 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
5351 vmx->nested.smm.vmxon = true;
5352 vmx->nested.vmxon = false;
5354 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
5355 vmx->nested.smm.guest_mode = true;
5358 vmcs12 = get_vmcs12(vcpu);
5359 if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
5362 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
5365 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
5368 vmx->nested.nested_run_pending =
5369 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
5371 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
5372 vmcs12->vmcs_link_pointer != -1ull) {
5373 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
5375 if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
5378 if (copy_from_user(shadow_vmcs12,
5379 user_kvm_nested_state->data + VMCS12_SIZE,
5383 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5384 !shadow_vmcs12->hdr.shadow_vmcs)
5388 if (nested_vmx_check_vmentry_prereqs(vcpu, vmcs12) ||
5389 nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
5392 vmx->nested.dirty_vmcs12 = true;
5393 ret = nested_vmx_enter_non_root_mode(vcpu, false);
5400 void nested_vmx_vcpu_setup(void)
5402 if (enable_shadow_vmcs) {
5404 * At vCPU creation, "VMWRITE to any supported field
5405 * in the VMCS" is supported, so use the more
5406 * permissive vmx_vmread_bitmap to specify both read
5407 * and write permissions for the shadow VMCS.
5409 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
5410 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap));
5415 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
5416 * returned for the various VMX controls MSRs when nested VMX is enabled.
5417 * The same values should also be used to verify that vmcs12 control fields are
5418 * valid during nested entry from L1 to L2.
5419 * Each of these control msrs has a low and high 32-bit half: A low bit is on
5420 * if the corresponding bit in the (32-bit) control field *must* be on, and a
5421 * bit in the high half is on if the corresponding bit in the control field
5422 * may be on. See also vmx_control_verify().
5424 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
5428 * Note that as a general rule, the high half of the MSRs (bits in
5429 * the control fields which may be 1) should be initialized by the
5430 * intersection of the underlying hardware's MSR (i.e., features which
5431 * can be supported) and the list of features we want to expose -
5432 * because they are known to be properly supported in our code.
5433 * Also, usually, the low half of the MSRs (bits which must be 1) can
5434 * be set to 0, meaning that L1 may turn off any of these bits. The
5435 * reason is that if one of these bits is necessary, it will appear
5436 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
5437 * fields of vmcs01 and vmcs02, will turn these bits off - and
5438 * nested_vmx_exit_reflected() will not pass related exits to L1.
5439 * These rules have exceptions below.
5442 /* pin-based controls */
5443 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
5444 msrs->pinbased_ctls_low,
5445 msrs->pinbased_ctls_high);
5446 msrs->pinbased_ctls_low |=
5447 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5448 msrs->pinbased_ctls_high &=
5449 PIN_BASED_EXT_INTR_MASK |
5450 PIN_BASED_NMI_EXITING |
5451 PIN_BASED_VIRTUAL_NMIS |
5452 (apicv ? PIN_BASED_POSTED_INTR : 0);
5453 msrs->pinbased_ctls_high |=
5454 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5455 PIN_BASED_VMX_PREEMPTION_TIMER;
5458 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
5459 msrs->exit_ctls_low,
5460 msrs->exit_ctls_high);
5461 msrs->exit_ctls_low =
5462 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
5464 msrs->exit_ctls_high &=
5465 #ifdef CONFIG_X86_64
5466 VM_EXIT_HOST_ADDR_SPACE_SIZE |
5468 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
5469 msrs->exit_ctls_high |=
5470 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
5471 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
5472 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
5474 /* We support free control of debug control saving. */
5475 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
5477 /* entry controls */
5478 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
5479 msrs->entry_ctls_low,
5480 msrs->entry_ctls_high);
5481 msrs->entry_ctls_low =
5482 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
5483 msrs->entry_ctls_high &=
5484 #ifdef CONFIG_X86_64
5485 VM_ENTRY_IA32E_MODE |
5487 VM_ENTRY_LOAD_IA32_PAT;
5488 msrs->entry_ctls_high |=
5489 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
5491 /* We support free control of debug control loading. */
5492 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
5494 /* cpu-based controls */
5495 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
5496 msrs->procbased_ctls_low,
5497 msrs->procbased_ctls_high);
5498 msrs->procbased_ctls_low =
5499 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
5500 msrs->procbased_ctls_high &=
5501 CPU_BASED_VIRTUAL_INTR_PENDING |
5502 CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
5503 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
5504 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
5505 CPU_BASED_CR3_STORE_EXITING |
5506 #ifdef CONFIG_X86_64
5507 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
5509 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
5510 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
5511 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
5512 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
5513 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
5515 * We can allow some features even when not supported by the
5516 * hardware. For example, L1 can specify an MSR bitmap - and we
5517 * can use it to avoid exits to L1 - even when L0 runs L2
5518 * without MSR bitmaps.
5520 msrs->procbased_ctls_high |=
5521 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
5522 CPU_BASED_USE_MSR_BITMAPS;
5524 /* We support free control of CR3 access interception. */
5525 msrs->procbased_ctls_low &=
5526 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
5529 * secondary cpu-based controls. Do not include those that
5530 * depend on CPUID bits, they are added later by vmx_cpuid_update.
5532 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
5533 msrs->secondary_ctls_low,
5534 msrs->secondary_ctls_high);
5535 msrs->secondary_ctls_low = 0;
5536 msrs->secondary_ctls_high &=
5537 SECONDARY_EXEC_DESC |
5538 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
5539 SECONDARY_EXEC_APIC_REGISTER_VIRT |
5540 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
5541 SECONDARY_EXEC_WBINVD_EXITING;
5544 * We can emulate "VMCS shadowing," even if the hardware
5545 * doesn't support it.
5547 msrs->secondary_ctls_high |=
5548 SECONDARY_EXEC_SHADOW_VMCS;
5551 /* nested EPT: emulate EPT also to L1 */
5552 msrs->secondary_ctls_high |=
5553 SECONDARY_EXEC_ENABLE_EPT;
5554 msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
5555 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
5556 if (cpu_has_vmx_ept_execute_only())
5558 VMX_EPT_EXECUTE_ONLY_BIT;
5559 msrs->ept_caps &= ept_caps;
5560 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
5561 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
5562 VMX_EPT_1GB_PAGE_BIT;
5563 if (enable_ept_ad_bits) {
5564 msrs->secondary_ctls_high |=
5565 SECONDARY_EXEC_ENABLE_PML;
5566 msrs->ept_caps |= VMX_EPT_AD_BIT;
5570 if (cpu_has_vmx_vmfunc()) {
5571 msrs->secondary_ctls_high |=
5572 SECONDARY_EXEC_ENABLE_VMFUNC;
5574 * Advertise EPTP switching unconditionally
5575 * since we emulate it
5578 msrs->vmfunc_controls =
5579 VMX_VMFUNC_EPTP_SWITCHING;
5583 * Old versions of KVM use the single-context version without
5584 * checking for support, so declare that it is supported even
5585 * though it is treated as global context. The alternative is
5586 * not failing the single-context invvpid, and it is worse.
5589 msrs->secondary_ctls_high |=
5590 SECONDARY_EXEC_ENABLE_VPID;
5591 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
5592 VMX_VPID_EXTENT_SUPPORTED_MASK;
5595 if (enable_unrestricted_guest)
5596 msrs->secondary_ctls_high |=
5597 SECONDARY_EXEC_UNRESTRICTED_GUEST;
5599 if (flexpriority_enabled)
5600 msrs->secondary_ctls_high |=
5601 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
5603 /* miscellaneous data */
5604 rdmsr(MSR_IA32_VMX_MISC,
5607 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
5609 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
5610 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
5611 VMX_MISC_ACTIVITY_HLT;
5612 msrs->misc_high = 0;
5615 * This MSR reports some information about VMX support. We
5616 * should return information about the VMX we emulate for the
5617 * guest, and the VMCS structure we give it - not about the
5618 * VMX support of the underlying hardware.
5622 VMX_BASIC_TRUE_CTLS |
5623 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
5624 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
5626 if (cpu_has_vmx_basic_inout())
5627 msrs->basic |= VMX_BASIC_INOUT;
5630 * These MSRs specify bits which the guest must keep fixed on
5631 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
5632 * We picked the standard core2 setting.
5634 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
5635 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
5636 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
5637 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
5639 /* These MSRs specify bits which the guest must keep fixed off. */
5640 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
5641 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
5643 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
5644 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
5647 void nested_vmx_hardware_unsetup(void)
5651 if (enable_shadow_vmcs) {
5652 for (i = 0; i < VMX_BITMAP_NR; i++)
5653 free_page((unsigned long)vmx_bitmap[i]);
5657 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
5661 if (!cpu_has_vmx_shadow_vmcs())
5662 enable_shadow_vmcs = 0;
5663 if (enable_shadow_vmcs) {
5664 for (i = 0; i < VMX_BITMAP_NR; i++) {
5665 vmx_bitmap[i] = (unsigned long *)
5666 __get_free_page(GFP_KERNEL);
5667 if (!vmx_bitmap[i]) {
5668 nested_vmx_hardware_unsetup();
5673 init_vmcs_shadow_fields();
5676 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear,
5677 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
5678 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld,
5679 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst,
5680 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread,
5681 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume,
5682 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite,
5683 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff,
5684 exit_handlers[EXIT_REASON_VMON] = handle_vmon,
5685 exit_handlers[EXIT_REASON_INVEPT] = handle_invept,
5686 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid,
5687 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc,
5689 kvm_x86_ops->check_nested_events = vmx_check_nested_events;
5690 kvm_x86_ops->get_nested_state = vmx_get_nested_state;
5691 kvm_x86_ops->set_nested_state = vmx_set_nested_state;
5692 kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages,
5693 kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
5694 kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;