Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[sfrench/cifs-2.6.git] / arch / x86 / kvm / lapic.c
index 86c33d53c90a0527b7adbd2afb1aa99877c58eb7..3136e05831cf35f9ac852c5b3dc6d2bd8dac4a4c 100644 (file)
@@ -2843,14 +2843,35 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
        u8 sipi_vector;
+       int r;
        unsigned long pe;
 
-       if (!lapic_in_kernel(vcpu) || !apic->pending_events)
+       if (!lapic_in_kernel(vcpu))
                return;
 
+       /*
+        * Read pending events before calling the check_events
+        * callback.
+        */
+       pe = smp_load_acquire(&apic->pending_events);
+       if (!pe)
+               return;
+
+       if (is_guest_mode(vcpu)) {
+               r = kvm_x86_ops.nested_ops->check_events(vcpu);
+               if (r < 0)
+                       return;
+               /*
+                * If an event has happened and caused a vmexit,
+                * we know INITs are latched and therefore
+                * we will not incorrectly deliver an APIC
+                * event instead of a vmexit.
+                */
+       }
+
        /*
         * INITs are latched while CPU is in specific states
-        * (SMM, VMX non-root mode, SVM with GIF=0).
+        * (SMM, VMX root mode, SVM with GIF=0).
         * Because a CPU cannot be in these states immediately
         * after it has processed an INIT signal (and thus in
         * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
@@ -2858,26 +2879,28 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
         */
        if (kvm_vcpu_latch_init(vcpu)) {
                WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
-               if (test_bit(KVM_APIC_SIPI, &apic->pending_events))
+               if (test_bit(KVM_APIC_SIPI, &pe))
                        clear_bit(KVM_APIC_SIPI, &apic->pending_events);
                return;
        }
 
-       pe = xchg(&apic->pending_events, 0);
        if (test_bit(KVM_APIC_INIT, &pe)) {
+               clear_bit(KVM_APIC_INIT, &apic->pending_events);
                kvm_vcpu_reset(vcpu, true);
                if (kvm_vcpu_is_bsp(apic->vcpu))
                        vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
                else
                        vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
        }
-       if (test_bit(KVM_APIC_SIPI, &pe) &&
-           vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
-               /* evaluate pending_events before reading the vector */
-               smp_rmb();
-               sipi_vector = apic->sipi_vector;
-               kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
-               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+       if (test_bit(KVM_APIC_SIPI, &pe)) {
+               clear_bit(KVM_APIC_SIPI, &apic->pending_events);
+               if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
+                       /* evaluate pending_events before reading the vector */
+                       smp_rmb();
+                       sipi_vector = apic->sipi_vector;
+                       kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
+                       vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+               }
        }
 }