KVM: SVM: merge update_cr0_intercept into svm_set_cr0
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 18 Feb 2021 14:51:06 +0000 (09:51 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 15 Mar 2021 08:42:38 +0000 (04:42 -0400)
The logic of update_cr0_intercept is pointlessly complicated.
All svm_set_cr0 is compute the effective cr0 and compare it with
the guest value.

Inlining the function and simplifying the condition
clarifies what it is doing.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/svm.c

index a3c24b46610f9ec1c60a8cc3d97c5832525a3892..a45f086a941dbdf9fd956411b9abf7e53ad1da2d 100644 (file)
@@ -1720,37 +1720,10 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
        vmcb_mark_dirty(svm->vmcb, VMCB_DT);
 }
 
-static void update_cr0_intercept(struct vcpu_svm *svm)
-{
-       ulong gcr0;
-       u64 *hcr0;
-
-       /*
-        * SEV-ES guests must always keep the CR intercepts cleared. CR
-        * tracking is done using the CR write traps.
-        */
-       if (sev_es_guest(svm->vcpu.kvm))
-               return;
-
-       gcr0 = svm->vcpu.arch.cr0;
-       hcr0 = &svm->vmcb->save.cr0;
-       *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
-               | (gcr0 & SVM_CR0_SELECTIVE_MASK);
-
-       vmcb_mark_dirty(svm->vmcb, VMCB_CR);
-
-       if (gcr0 == *hcr0) {
-               svm_clr_intercept(svm, INTERCEPT_CR0_READ);
-               svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
-       } else {
-               svm_set_intercept(svm, INTERCEPT_CR0_READ);
-               svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
-       }
-}
-
 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+       u64 hcr0 = cr0;
 
 #ifdef CONFIG_X86_64
        if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
@@ -1768,7 +1741,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        vcpu->arch.cr0 = cr0;
 
        if (!npt_enabled)
-               cr0 |= X86_CR0_PG | X86_CR0_WP;
+               hcr0 |= X86_CR0_PG | X86_CR0_WP;
 
        /*
         * re-enable caching here because the QEMU bios
@@ -1776,10 +1749,27 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
         * reboot
         */
        if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
-               cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
-       svm->vmcb->save.cr0 = cr0;
+               hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+
+       svm->vmcb->save.cr0 = hcr0;
        vmcb_mark_dirty(svm->vmcb, VMCB_CR);
-       update_cr0_intercept(svm);
+
+       /*
+        * SEV-ES guests must always keep the CR intercepts cleared. CR
+        * tracking is done using the CR write traps.
+        */
+       if (sev_es_guest(svm->vcpu.kvm))
+               return;
+
+       if (hcr0 == cr0) {
+               /* Selective CR0 write remains on.  */
+               svm_clr_intercept(svm, INTERCEPT_CR0_READ);
+               svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
+       } else {
+               svm_set_intercept(svm, INTERCEPT_CR0_READ);
+               svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
+       }
+
 }
 
 static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)