Merge branch 'for-4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[sfrench/cifs-2.6.git] / arch / arm64 / kvm / handle_exit.c
index e60494f1eef9d5970e1ee5bda1daef2c58321670..520b0dad3c62bf671d084411ec5b707e93f5d271 100644 (file)
 #include <linux/kvm_host.h>
 
 #include <asm/esr.h>
+#include <asm/exception.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_coproc.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_mmu.h>
 #include <asm/kvm_psci.h>
 #include <asm/debug-monitors.h>
+#include <asm/traps.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
 typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
 
+static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
+{
+       if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
+               kvm_inject_vabt(vcpu);
+}
+
 static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        int ret;
@@ -242,7 +250,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
                        *vcpu_pc(vcpu) -= adj;
                }
 
-               kvm_inject_vabt(vcpu);
                return 1;
        }
 
@@ -252,7 +259,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
        case ARM_EXCEPTION_IRQ:
                return 1;
        case ARM_EXCEPTION_EL1_SERROR:
-               kvm_inject_vabt(vcpu);
                /* We may still need to return for single-step */
                if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
                        && kvm_arm_handle_step_debug(vcpu, run))
@@ -275,3 +281,25 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
                return 0;
        }
 }
+
+/* For exit types that need handling before we can be preempted */
+void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
+                      int exception_index)
+{
+       if (ARM_SERROR_PENDING(exception_index)) {
+               if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
+                       u64 disr = kvm_vcpu_get_disr(vcpu);
+
+                       kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
+               } else {
+                       kvm_inject_vabt(vcpu);
+               }
+
+               return;
+       }
+
+       exception_index = ARM_EXCEPTION_CODE(exception_index);
+
+       if (exception_index == ARM_EXCEPTION_EL1_SERROR)
+               kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
+}