Merge branch 'kvm-arm64/hyp-pcpu' into kvmarm-master/next
authorMarc Zyngier <maz@kernel.org>
Wed, 30 Sep 2020 13:05:35 +0000 (14:05 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 30 Sep 2020 13:05:35 +0000 (14:05 +0100)
Signed-off-by: Marc Zyngier <maz@kernel.org>
14 files changed:
1  2 
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/Makefile
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c

index 3e4577013d33c333a376ae281ae7bac1555d5a05,3438e85e1df6df80f7ad9c7a14ea14e7680063cb..54387ccd1ab26ad11822e5bb85ad8f852e16e85d
        DECLARE_KVM_VHE_SYM(sym);               \
        DECLARE_KVM_NVHE_SYM(sym)
  
 -#define CHOOSE_VHE_SYM(sym)   sym
 -#define CHOOSE_NVHE_SYM(sym)  kvm_nvhe_sym(sym)
 -
+ #define DECLARE_KVM_VHE_PER_CPU(type, sym)    \
+       DECLARE_PER_CPU(type, sym)
+ #define DECLARE_KVM_NVHE_PER_CPU(type, sym)   \
+       DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
+ #define DECLARE_KVM_HYP_PER_CPU(type, sym)    \
+       DECLARE_KVM_VHE_PER_CPU(type, sym);     \
+       DECLARE_KVM_NVHE_PER_CPU(type, sym)
 -#ifndef __KVM_NVHE_HYPERVISOR__
+ /*
+  * Compute pointer to a symbol defined in nVHE percpu region.
+  * Returns NULL if percpu memory has not been allocated yet.
+  */
+ #define this_cpu_ptr_nvhe_sym(sym)    per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
+ #define per_cpu_ptr_nvhe_sym(sym, cpu)                                                \
+       ({                                                                      \
+               unsigned long base, off;                                        \
+               base = kvm_arm_hyp_percpu_base[cpu];                            \
+               off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    \
+                     (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         \
+               base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      \
+       })
- #define CHOOSE_HYP_SYM(sym)   CHOOSE_NVHE_SYM(sym)
 +#if defined(__KVM_NVHE_HYPERVISOR__)
 +
- #define CHOOSE_VHE_SYM(sym)   __nvhe_undefined_symbol
 +#define CHOOSE_NVHE_SYM(sym)  sym
++#define CHOOSE_HYP_SYM(sym)   CHOOSE_NVHE_SYM(sym)
++
 +/* The nVHE hypervisor shouldn't even try to access VHE symbols */
 +extern void *__nvhe_undefined_symbol;
- #elif defined(__KVM_VHE_HYPERVISOR)
++#define CHOOSE_VHE_SYM(sym)           __nvhe_undefined_symbol
++#define this_cpu_ptr_hyp_sym(sym)     (&__nvhe_undefined_symbol)
++#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
 +
- #define CHOOSE_HYP_SYM(sym)   CHOOSE_VHE_SYM(sym)
++#elif defined(__KVM_VHE_HYPERVISOR__)
 +
- #define CHOOSE_NVHE_SYM(sym)  __vhe_undefined_symbol
 +#define CHOOSE_VHE_SYM(sym)   sym
++#define CHOOSE_HYP_SYM(sym)   CHOOSE_VHE_SYM(sym)
++
 +/* The VHE hypervisor shouldn't even try to access nVHE symbols */
 +extern void *__vhe_undefined_symbol;
++#define CHOOSE_NVHE_SYM(sym)          __vhe_undefined_symbol
++#define this_cpu_ptr_hyp_sym(sym)     (&__vhe_undefined_symbol)
++#define per_cpu_ptr_hyp_sym(sym, cpu) (&__vhe_undefined_symbol)
 +
 +#else
 +
  /*
   * BIG FAT WARNINGS:
   *
   * - Don't let the nVHE hypervisor have access to this, as it will
   *   pick the *wrong* symbol (yes, it runs at EL2...).
   */
- #define CHOOSE_HYP_SYM(sym)   (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
+ #define CHOOSE_HYP_SYM(sym)           (is_kernel_in_hyp_mode()        \
+                                          ? CHOOSE_VHE_SYM(sym)        \
                                           : CHOOSE_NVHE_SYM(sym))
 -#else
 -/* The nVHE hypervisor shouldn't even try to access anything */
 -extern void *__nvhe_undefined_symbol;
 -#define CHOOSE_HYP_SYM(sym)           __nvhe_undefined_symbol
 -#define this_cpu_ptr_hyp_sym(sym)     (&__nvhe_undefined_symbol)
 -#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
++
+ #define this_cpu_ptr_hyp_sym(sym)     (is_kernel_in_hyp_mode()        \
+                                          ? this_cpu_ptr(&sym)         \
+                                          : this_cpu_ptr_nvhe_sym(sym))
++
+ #define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode()        \
+                                          ? per_cpu_ptr(&sym, cpu)     \
+                                          : per_cpu_ptr_nvhe_sym(sym, cpu))
++
 +#define CHOOSE_VHE_SYM(sym)   sym
 +#define CHOOSE_NVHE_SYM(sym)  kvm_nvhe_sym(sym)
 +
  #endif
  
  /* Translate a kernel address @ptr into its equivalent linear mapping */
@@@ -135,13 -121,14 +165,16 @@@ struct kvm_vcpu
  struct kvm_s2_mmu;
  
  DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
 +DECLARE_KVM_NVHE_SYM(__kvm_hyp_host_vector);
  DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
  #define __kvm_hyp_init                CHOOSE_NVHE_SYM(__kvm_hyp_init)
 +#define __kvm_hyp_host_vector CHOOSE_NVHE_SYM(__kvm_hyp_host_vector)
  #define __kvm_hyp_vector      CHOOSE_HYP_SYM(__kvm_hyp_vector)
  
- #ifdef CONFIG_KVM_INDIRECT_VECTORS
+ extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
+ DECLARE_KVM_NVHE_SYM(__per_cpu_start);
+ DECLARE_KVM_NVHE_SYM(__per_cpu_end);
  extern atomic_t arm64_el2_vector_last_slot;
  DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
  #define __bp_harden_hyp_vecs  CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
@@@ -260,16 -214,6 +260,16 @@@ extern char __smccc_workaround_1_smc[__
        ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
  .endm
  
-       hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
 +.macro get_loaded_vcpu vcpu, ctxt
-       hyp_adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
++      adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
 +      ldr     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
 +.endm
 +
 +.macro set_loaded_vcpu vcpu, ctxt, tmp
++      adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
 +      str     \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
 +.endm
 +
  /*
   * KVM extable for unexpected exceptions.
   * In the same format _asm_extable, but output to a different section so that
index d56d67c3787e34e37ddebf3fda6bd97dcb37fe18,1247d1f30cb3af139e0385bec20d40db5b5b27e8..0aecbab6a7fb3bfc941f0776d5b31e32ca913195
@@@ -568,7 -565,7 +568,7 @@@ void kvm_set_sei_esr(struct kvm_vcpu *v
  
  struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
  
- DECLARE_PER_CPU(struct kvm_host_data, kvm_host_data);
 -DECLARE_KVM_HYP_PER_CPU(kvm_host_data_t, kvm_host_data);
++DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
  
  static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
  {
index c490fe8089b3ee1a36ca4d11c3d86431ffa093e8,cff1cebc759048bcf8c902bb6ea8253462e78424..331394306ccee34633d178dbec9c1800612ac910
@@@ -258,78 -477,32 +254,8 @@@ static inline void *kvm_get_hyp_vector(
        return vect;
  }
  
- /*  This is only called on a !VHE system */
- static inline int kvm_map_vectors(void)
- {
-       /*
-        * HBP  = ARM64_HARDEN_BRANCH_PREDICTOR
-        * HEL2 = ARM64_HARDEN_EL2_VECTORS
-        *
-        * !HBP + !HEL2 -> use direct vectors
-        *  HBP + !HEL2 -> use hardened vectors in place
-        * !HBP +  HEL2 -> allocate one vector slot and use exec mapping
-        *  HBP +  HEL2 -> use hardened vertors and use exec mapping
-        */
-       if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
-               __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
-               __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
-       }
-       if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
-               phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
-               unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
-               /*
-                * Always allocate a spare vector slot, as we don't
-                * know yet which CPUs have a BP hardening slot that
-                * we can reuse.
-                */
-               __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
-               BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
-               return create_hyp_exec_mappings(vect_pa, size,
-                                               &__kvm_bp_vect_base);
-       }
-       return 0;
- }
- #else
- static inline void *kvm_get_hyp_vector(void)
- {
-       return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
- }
- static inline int kvm_map_vectors(void)
- {
-       return 0;
- }
- #endif
- #ifdef CONFIG_ARM64_SSBD
- DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
- static inline int hyp_map_aux_data(void)
- {
-       int cpu, err;
-       for_each_possible_cpu(cpu) {
-               u64 *ptr;
-               ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
-               err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
-               if (err)
-                       return err;
-       }
-       return 0;
- }
- #else
- static inline int hyp_map_aux_data(void)
- {
-       return 0;
- }
- #endif
  #define kvm_phys_to_vttbr(addr)               phys_to_ttbr(addr)
  
 -/*
 - * Get the magic number 'x' for VTTBR:BADDR of this KVM instance.
 - * With v8.2 LVA extensions, 'x' should be a minimum of 6 with
 - * 52bit IPS.
 - */
 -static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels)
 -{
 -      int x = ARM64_VTTBR_X(ipa_shift, levels);
 -
 -      return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x;
 -}
 -
 -static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels)
 -{
 -      unsigned int x = arm64_vttbr_x(ipa_shift, levels);
 -
 -      return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x);
 -}
 -
 -static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
 -{
 -      return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
 -}
 -
  static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
  {
        struct kvm_vmid *vmid = &mmu->vmid;
Simple merge
index e49189012af11c4fddeae8a9f9e5eb622c79145f,f8388da6f3c76e28c6b18ecadb78100e7b2d60ac..f56122eedffc84866395218d6006c47037b061a5
  __asm__(".arch_extension      virt");
  #endif
  
- DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
- DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
- DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
++DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
++
  static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+ unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
  
  /* The VMID used in the VTTBR */
  static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
@@@ -1320,8 -1356,6 +1367,8 @@@ static void cpu_hyp_reinit(void
  
        cpu_hyp_reset();
  
-       __this_cpu_write(kvm_hyp_vector, (unsigned long)kvm_get_hyp_vector());
++      *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)kvm_get_hyp_vector();
 +
        if (is_kernel_in_hyp_mode())
                kvm_timer_init_vhe();
        else
index 607b8a898826601aff841f74c5bfc702e73385d2,d898f0da5802e3abe2131c6dc44afb158cbe8573..4a81eddabcd83f064fc47adc246c7b203c783323
@@@ -10,5 -10,4 +10,4 @@@ subdir-ccflags-y := -I$(incdir)                               
                    -DDISABLE_BRANCH_PROFILING          \
                    $(DISABLE_STACKLEAK_PLUGIN)
  
- obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
- obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o
 -obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o
++obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o smccc_wa.o
index afaa8d1f24854cd158d91ae51ef3de94e22a6c6a,76e7eaf4675eb4b7a148ab06c4db62c33dc2db96..b0afad7a99c6e81c0f5f8bfb70cd9acfc67ff68c
   */
  SYM_FUNC_START(__guest_enter)
        // x0: vcpu
 -      // x1: host context
 -      // x2-x17: clobbered by macros
 +      // x1-x17: clobbered by macros
        // x29: guest context
  
-       hyp_adr_this_cpu x1, kvm_hyp_ctxt, x2
 -      // Store the host regs
++      adr_this_cpu x1, kvm_hyp_ctxt, x2
 +
 +      // Store the hyp regs
        save_callee_saved_regs x1
  
 -      // Save the host's sp_el0
 +      // Save hyp's sp_el0
        save_sp_el0     x1, x2
  
 -      // Now the host state is stored if we have a pending RAS SError it must
 -      // affect the host. If any asynchronous exception is pending we defer
 -      // the guest entry. The DSB isn't necessary before v8.2 as any SError
 -      // would be fatal.
 +      // Now the hyp state is stored if we have a pending RAS SError it must
 +      // affect the host or hyp. If any asynchronous exception is pending we
 +      // defer the guest entry. The DSB isn't necessary before v8.2 as any
 +      // SError would be fatal.
  alternative_if ARM64_HAS_RAS_EXTN
        dsb     nshst
        isb
@@@ -79,26 -116,6 +79,26 @@@ alternative_else_nop_endi
        eret
        sb
  
-       hyp_adr_this_cpu x0, kvm_hyp_ctxt, x1
 +SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
 +      // x2-x29,lr: vcpu regs
 +      // vcpu x0-x1 on the stack
 +
 +      // If the hyp context is loaded, go straight to hyp_panic
 +      get_loaded_vcpu x0, x1
 +      cbz     x0, hyp_panic
 +
 +      // The hyp context is saved so make sure it is restored to allow
 +      // hyp_panic to run at hyp and, subsequently, panic to run in the host.
 +      // This makes use of __guest_exit to avoid duplication but sets the
 +      // return address to tail call into hyp_panic. As a side effect, the
 +      // current state is saved to the guest context but it will only be
 +      // accurate if the guest had been completely restored.
++      adr_this_cpu x0, kvm_hyp_ctxt, x1
 +      adr     x1, hyp_panic
 +      str     x1, [x0, #CPU_XREG_OFFSET(30)]
 +
 +      get_vcpu_ptr    x1, x0
 +
  SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
        // x0: return code
        // x1: vcpu
        // Store the guest's sp_el0
        save_sp_el0     x1, x2
  
-       hyp_adr_this_cpu x2, kvm_hyp_ctxt, x3
 -      get_host_ctxt   x2, x3
++      adr_this_cpu x2, kvm_hyp_ctxt, x3
  
 -      // Macro ptrauth_switch_to_guest format:
 -      //      ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
 +      // Macro ptrauth_switch_to_hyp format:
 +      //      ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
        // The below macro to save/restore keys is not implemented in C code
        // as it may cause Pointer Authentication key signing mismatch errors
        // when this feature is enabled for kernel code.
Simple merge
index 4536b50ddc06c97439c59d8d208c905cc05ff863,0d656914f4210eec10a59daf616584dcf36159ac..eeac62b685a9e50a2722441dcf7b52e0411e29f7
@@@ -383,7 -386,7 +383,7 @@@ static inline bool __hyp_handle_ptrauth
            !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
                return false;
  
-       ctxt = __hyp_this_cpu_ptr(kvm_hyp_ctxt);
 -      ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
++      ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
        __ptrauth_save_key(ctxt, APIA);
        __ptrauth_save_key(ctxt, APIB);
        __ptrauth_save_key(ctxt, APDA);
@@@ -476,43 -479,10 +476,10 @@@ exit
        return false;
  }
  
- static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
- {
-       if (!cpus_have_final_cap(ARM64_SSBD))
-               return false;
-       return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
- }
- static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
- {
- #ifdef CONFIG_ARM64_SSBD
-       /*
-        * The host runs with the workaround always present. If the
-        * guest wants it disabled, so be it...
-        */
-       if (__needs_ssbd_off(vcpu) &&
-           __hyp_this_cpu_read(arm64_ssbd_callback_required))
-               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
- #endif
- }
- static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
- {
- #ifdef CONFIG_ARM64_SSBD
-       /*
-        * If the guest has disabled the workaround, bring it back on.
-        */
-       if (__needs_ssbd_off(vcpu) &&
-           __hyp_this_cpu_read(arm64_ssbd_callback_required))
-               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
- #endif
- }
  static inline void __kvm_unexpected_el2_exception(void)
  {
 +      extern char __guest_exit_panic[];
        unsigned long addr, fixup;
 -      struct kvm_cpu_context *host_ctxt;
        struct exception_table_entry *entry, *end;
        unsigned long elr_el2 = read_sysreg(elr_el2);
  
Simple merge
index a29f247f35e3a395babdaa77b32374eca180cc4d,4472558cbdd95404f25e10880a975093442b5f83..a457a0306e031fbd3ecf5993f8a14173dfddff8e
  #include <asm/processor.h>
  #include <asm/thread_info.h>
  
 -/* Non-VHE copy of the kernel symbol. */
 -DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 -
 -/* Non-VHE instance of kvm_host_data. */
 -DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
++/* Non-VHE specific context */
++DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
++DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
++DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
  static void __activate_traps(struct kvm_vcpu *vcpu)
  {
        u64 val;
@@@ -42,7 -48,6 +47,7 @@@
        }
  
        write_sysreg(val, cptr_el2);
-       write_sysreg(__hyp_this_cpu_read(kvm_hyp_vector), vbar_el2);
++      write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
  
        if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
@@@ -176,7 -179,9 +181,7 @@@ int __kvm_vcpu_run(struct kvm_vcpu *vcp
                pmr_sync();
        }
  
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
 -      vcpu = kern_hyp_va(vcpu);
 -
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        host_ctxt->__hyp_running_vcpu = vcpu;
        guest_ctxt = &vcpu->arch.ctxt;
  
  
        __debug_switch_to_guest(vcpu);
  
-       __set_guest_arch_workaround_state(vcpu);
        do {
                /* Jump in the fire! */
 -              exit_code = __guest_enter(vcpu, host_ctxt);
 +              exit_code = __guest_enter(vcpu);
  
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
@@@ -250,17 -249,13 +251,17 @@@ void __noreturn hyp_panic(void
        u64 spsr = read_sysreg_el2(SYS_SPSR);
        u64 elr = read_sysreg_el2(SYS_ELR);
        u64 par = read_sysreg(par_el1);
 -      struct kvm_vcpu *vcpu = host_ctxt->__hyp_running_vcpu;
 -      unsigned long str_va;
 +      bool restore_host = true;
 +      struct kvm_cpu_context *host_ctxt;
 +      struct kvm_vcpu *vcpu;
  
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
 -      if (read_sysreg(vttbr_el2)) {
++      host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
 +      vcpu = host_ctxt->__hyp_running_vcpu;
 +
 +      if (vcpu) {
                __timer_disable_traps(vcpu);
                __deactivate_traps(vcpu);
 -              __deactivate_vm(vcpu);
 +              __load_host_stage2();
                __sysreg_restore_state_nvhe(host_ctxt);
        }
  
index cf477f856e51bf15814a6e8fea285128675e9fcc,a8d40753279855c160bbf6a56342e3859f3e3257..fe69de16dadc698030954c5d5a60b237b1b201a8
  
  const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
  
 -/* VHE instance of kvm_host_data. */
 -DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
++/* VHE specific context */
++DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
++DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
++DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
  static void __activate_traps(struct kvm_vcpu *vcpu)
  {
        u64 val;
@@@ -131,11 -134,9 +136,9 @@@ static int __kvm_vcpu_run_vhe(struct kv
        sysreg_restore_guest_state_vhe(guest_ctxt);
        __debug_switch_to_guest(vcpu);
  
-       __set_guest_arch_workaround_state(vcpu);
        do {
                /* Jump in the fire! */
 -              exit_code = __guest_enter(vcpu, host_ctxt);
 +              exit_code = __guest_enter(vcpu);
  
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
@@@ -192,12 -191,10 +193,12 @@@ int __kvm_vcpu_run(struct kvm_vcpu *vcp
        return ret;
  }
  
 -static void __hyp_call_panic(u64 spsr, u64 elr, u64 par,
 -                           struct kvm_cpu_context *host_ctxt)
 +static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
  {
 +      struct kvm_cpu_context *host_ctxt;
        struct kvm_vcpu *vcpu;
-       host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
 +
++      host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        vcpu = host_ctxt->__hyp_running_vcpu;
  
        __deactivate_traps(vcpu);
Simple merge
Simple merge