Merge tag 'kvm-arm-for-v4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm...
[sfrench/cifs-2.6.git] / virt / kvm / arm / arm.c
index cd7d90c9f644d18743020672afbbc7ae06dcbbb9..92b95ae9a2ca04b5a474201e0908d265973b1959 100644 (file)
@@ -71,17 +71,17 @@ static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
 
 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
 {
 
 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
 {
-       BUG_ON(preemptible());
        __this_cpu_write(kvm_arm_running_vcpu, vcpu);
 }
 
        __this_cpu_write(kvm_arm_running_vcpu, vcpu);
 }
 
+DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+
 /**
  * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
  * Must be called from non-preemptible context
  */
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
 {
 /**
  * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
  * Must be called from non-preemptible context
  */
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
 {
-       BUG_ON(preemptible());
        return __this_cpu_read(kvm_arm_running_vcpu);
 }
 
        return __this_cpu_read(kvm_arm_running_vcpu);
 }
 
@@ -295,6 +295,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
+       if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
+               static_branch_dec(&userspace_irqchip_in_use);
+
        kvm_mmu_free_memory_caches(vcpu);
        kvm_timer_vcpu_terminate(vcpu);
        kvm_pmu_vcpu_destroy(vcpu);
        kvm_mmu_free_memory_caches(vcpu);
        kvm_timer_vcpu_terminate(vcpu);
        kvm_pmu_vcpu_destroy(vcpu);
@@ -532,14 +535,22 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 
        vcpu->arch.has_run_once = true;
 
 
        vcpu->arch.has_run_once = true;
 
-       /*
-        * Map the VGIC hardware resources before running a vcpu the first
-        * time on this VM.
-        */
-       if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
-               ret = kvm_vgic_map_resources(kvm);
-               if (ret)
-                       return ret;
+       if (likely(irqchip_in_kernel(kvm))) {
+               /*
+                * Map the VGIC hardware resources before running a vcpu the
+                * first time on this VM.
+                */
+               if (unlikely(!vgic_ready(kvm))) {
+                       ret = kvm_vgic_map_resources(kvm);
+                       if (ret)
+                               return ret;
+               }
+       } else {
+               /*
+                * Tell the rest of the code that there are userspace irqchip
+                * VMs in the wild.
+                */
+               static_branch_inc(&userspace_irqchip_in_use);
        }
 
        ret = kvm_timer_enable(vcpu);
        }
 
        ret = kvm_timer_enable(vcpu);
@@ -680,18 +691,29 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                kvm_vgic_flush_hwstate(vcpu);
 
                /*
                kvm_vgic_flush_hwstate(vcpu);
 
                /*
-                * If we have a singal pending, or need to notify a userspace
-                * irqchip about timer or PMU level changes, then we exit (and
-                * update the timer level state in kvm_timer_update_run
-                * below).
+                * Exit if we have a signal pending so that we can deliver the
+                * signal to user space.
                 */
                 */
-               if (signal_pending(current) ||
-                   kvm_timer_should_notify_user(vcpu) ||
-                   kvm_pmu_should_notify_user(vcpu)) {
+               if (signal_pending(current)) {
                        ret = -EINTR;
                        run->exit_reason = KVM_EXIT_INTR;
                }
 
                        ret = -EINTR;
                        run->exit_reason = KVM_EXIT_INTR;
                }
 
+               /*
+                * If we're using a userspace irqchip, then check if we need
+                * to tell a userspace irqchip about timer or PMU level
+                * changes and if so, exit to userspace (the actual level
+                * state gets updated in kvm_timer_update_run and
+                * kvm_pmu_update_run below).
+                */
+               if (static_branch_unlikely(&userspace_irqchip_in_use)) {
+                       if (kvm_timer_should_notify_user(vcpu) ||
+                           kvm_pmu_should_notify_user(vcpu)) {
+                               ret = -EINTR;
+                               run->exit_reason = KVM_EXIT_INTR;
+                       }
+               }
+
                /*
                 * Ensure we set mode to IN_GUEST_MODE after we disable
                 * interrupts and before the final VCPU requests check.
                /*
                 * Ensure we set mode to IN_GUEST_MODE after we disable
                 * interrupts and before the final VCPU requests check.
@@ -704,7 +726,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                    kvm_request_pending(vcpu)) {
                        vcpu->mode = OUTSIDE_GUEST_MODE;
                        kvm_pmu_sync_hwstate(vcpu);
                    kvm_request_pending(vcpu)) {
                        vcpu->mode = OUTSIDE_GUEST_MODE;
                        kvm_pmu_sync_hwstate(vcpu);
-                       kvm_timer_sync_hwstate(vcpu);
+                       if (static_branch_unlikely(&userspace_irqchip_in_use))
+                               kvm_timer_sync_hwstate(vcpu);
                        kvm_vgic_sync_hwstate(vcpu);
                        local_irq_enable();
                        preempt_enable();
                        kvm_vgic_sync_hwstate(vcpu);
                        local_irq_enable();
                        preempt_enable();
@@ -748,7 +771,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                 * we don't want vtimer interrupts to race with syncing the
                 * timer virtual interrupt state.
                 */
                 * we don't want vtimer interrupts to race with syncing the
                 * timer virtual interrupt state.
                 */
-               kvm_timer_sync_hwstate(vcpu);
+               if (static_branch_unlikely(&userspace_irqchip_in_use))
+                       kvm_timer_sync_hwstate(vcpu);
 
                /*
                 * We may have taken a host interrupt in HYP mode (ie
 
                /*
                 * We may have taken a host interrupt in HYP mode (ie
@@ -1277,6 +1301,7 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
                        cpu_hyp_reset();
 
                return NOTIFY_OK;
                        cpu_hyp_reset();
 
                return NOTIFY_OK;
+       case CPU_PM_ENTER_FAILED:
        case CPU_PM_EXIT:
                if (__this_cpu_read(kvm_arm_hardware_enabled))
                        /* The hardware was enabled before suspend. */
        case CPU_PM_EXIT:
                if (__this_cpu_read(kvm_arm_hardware_enabled))
                        /* The hardware was enabled before suspend. */