Merge branch 'kvm-fix-svm-races' into kvm-master
[sfrench/cifs-2.6.git] / arch / x86 / kvm / svm / nested.c
index 35891d9a1099b3f54d146e0abb26b89c74075b2e..fb204eaa8bb3924063a2f78e63bc98ffc450bb7f 100644 (file)
@@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
        return true;
 }
 
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
 {
        struct kvm_vcpu *vcpu = &svm->vcpu;
        bool vmcb12_lma;
 
+       /*
+        * FIXME: these should be done after copying the fields,
+        * to avoid TOC/TOU races.  For these save area checks
+        * the possible damage is limited since kvm_set_cr0 and
+        * kvm_set_cr4 handle failure; EFER_SVME is an exception
+        * so it is force-set later in nested_prepare_vmcb_save.
+        */
        if ((vmcb12->save.efer & EFER_SVME) == 0)
                return false;
 
@@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
        if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
                return false;
 
-       return nested_vmcb_check_controls(&vmcb12->control);
+       return true;
 }
 
 static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
        svm->vmcb->save.gdtr = vmcb12->save.gdtr;
        svm->vmcb->save.idtr = vmcb12->save.idtr;
        kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
-       svm_set_efer(&svm->vcpu, vmcb12->save.efer);
+
+       /*
+        * Force-set EFER_SVME even though it is checked earlier on the
+        * VMCB12, because the guest can flip the bit between the check
+        * and now.  Clearing EFER_SVME would call svm_free_nested.
+        */
+       svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
+
        svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
        svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
        svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
@@ -468,7 +482,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
 
 
        svm->nested.vmcb12_gpa = vmcb12_gpa;
-       load_nested_vmcb_control(svm, &vmcb12->control);
        nested_prepare_vmcb_control(svm);
        nested_prepare_vmcb_save(svm, vmcb12);
 
@@ -515,7 +528,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
        if (WARN_ON_ONCE(!svm->nested.initialized))
                return -EINVAL;
 
-       if (!nested_vmcb_checks(svm, vmcb12)) {
+       load_nested_vmcb_control(svm, &vmcb12->control);
+
+       if (!nested_vmcb_check_save(svm, vmcb12) ||
+           !nested_vmcb_check_controls(&svm->nested.ctl)) {
                vmcb12->control.exit_code    = SVM_EXIT_ERR;
                vmcb12->control.exit_code_hi = 0;
                vmcb12->control.exit_info_1  = 0;
@@ -1209,6 +1225,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
         */
        if (!(save->cr0 & X86_CR0_PG))
                goto out_free;
+       if (!(save->efer & EFER_SVME))
+               goto out_free;
 
        /*
         * All checks done, we can enter guest mode.  L1 control fields