Merge remote-tracking branch 'arm64/for-next/ghostbusters' into kvm-arm64/hyp-pcpu
authorMarc Zyngier <maz@kernel.org>
Wed, 30 Sep 2020 08:48:30 +0000 (09:48 +0100)
committerMarc Zyngier <maz@kernel.org>
Wed, 30 Sep 2020 08:48:30 +0000 (09:48 +0100)
Signed-off-by: Marc Zyngier <maz@kernel.org>
1  2 
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/image-vars.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c

index 863f669d4dc8694e28b6c60194806add4d7bb721,7f7072f6cb45f649dd639867adf9980131556751..3438e85e1df6df80f7ad9c7a14ea14e7680063cb
@@@ -7,12 -7,8 +7,9 @@@
  #ifndef __ARM_KVM_ASM_H__
  #define __ARM_KVM_ASM_H__
  
 +#include <asm/hyp_image.h>
  #include <asm/virt.h>
  
- #define       VCPU_WORKAROUND_2_FLAG_SHIFT    0
- #define       VCPU_WORKAROUND_2_FLAG          (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
  #define ARM_EXIT_WITH_SERROR_BIT  31
  #define ARM_EXCEPTION_CODE(x)   ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
  #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
  
  #include <linux/mm.h>
  
 -/*
 - * Translate name of a symbol defined in nVHE hyp to the name seen
 - * by kernel proper. All nVHE symbols are prefixed by the build system
 - * to avoid clashes with the VHE variants.
 - */
 -#define kvm_nvhe_sym(sym)     __kvm_nvhe_##sym
 -
  #define DECLARE_KVM_VHE_SYM(sym)      extern char sym[]
  #define DECLARE_KVM_NVHE_SYM(sym)     extern char kvm_nvhe_sym(sym)[]
  
        DECLARE_KVM_VHE_SYM(sym);               \
        DECLARE_KVM_NVHE_SYM(sym)
  
 +#define DECLARE_KVM_VHE_PER_CPU(type, sym)    \
 +      DECLARE_PER_CPU(type, sym)
 +#define DECLARE_KVM_NVHE_PER_CPU(type, sym)   \
 +      DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
 +
 +#define DECLARE_KVM_HYP_PER_CPU(type, sym)    \
 +      DECLARE_KVM_VHE_PER_CPU(type, sym);     \
 +      DECLARE_KVM_NVHE_PER_CPU(type, sym)
 +
  #define CHOOSE_VHE_SYM(sym)   sym
  #define CHOOSE_NVHE_SYM(sym)  kvm_nvhe_sym(sym)
  
 +/*
 + * Compute pointer to a symbol defined in nVHE percpu region.
 + * Returns NULL if percpu memory has not been allocated yet.
 + */
 +#define this_cpu_ptr_nvhe_sym(sym)    per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
 +#define per_cpu_ptr_nvhe_sym(sym, cpu)                                                \
 +      ({                                                                      \
 +              unsigned long base, off;                                        \
 +              base = kvm_arm_hyp_percpu_base[cpu];                            \
 +              off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    \
 +                    (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         \
 +              base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      \
 +      })
 +
  #ifndef __KVM_NVHE_HYPERVISOR__
  /*
   * BIG FAT WARNINGS:
   * - Don't let the nVHE hypervisor have access to this, as it will
   *   pick the *wrong* symbol (yes, it runs at EL2...).
   */
 -#define CHOOSE_HYP_SYM(sym)   (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
 +#define CHOOSE_HYP_SYM(sym)           (is_kernel_in_hyp_mode()        \
 +                                         ? CHOOSE_VHE_SYM(sym)        \
                                           : CHOOSE_NVHE_SYM(sym))
 +#define this_cpu_ptr_hyp_sym(sym)     (is_kernel_in_hyp_mode()        \
 +                                         ? this_cpu_ptr(&sym)         \
 +                                         : this_cpu_ptr_nvhe_sym(sym))
 +#define per_cpu_ptr_hyp_sym(sym, cpu) (is_kernel_in_hyp_mode()        \
 +                                         ? per_cpu_ptr(&sym, cpu)     \
 +                                         : per_cpu_ptr_nvhe_sym(sym, cpu))
  #else
  /* The nVHE hypervisor shouldn't even try to access anything */
  extern void *__nvhe_undefined_symbol;
 -#define CHOOSE_HYP_SYM(sym)   __nvhe_undefined_symbol
 +#define CHOOSE_HYP_SYM(sym)           __nvhe_undefined_symbol
 +#define this_cpu_ptr_hyp_sym(sym)     (&__nvhe_undefined_symbol)
 +#define per_cpu_ptr_hyp_sym(sym, cpu) (&__nvhe_undefined_symbol)
  #endif
  
  /* Translate a kernel address @ptr into its equivalent linear mapping */
@@@ -128,15 -99,9 +125,13 @@@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector)
  #define __kvm_hyp_init                CHOOSE_NVHE_SYM(__kvm_hyp_init)
  #define __kvm_hyp_vector      CHOOSE_HYP_SYM(__kvm_hyp_vector)
  
- #ifdef CONFIG_KVM_INDIRECT_VECTORS
 +extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
 +DECLARE_KVM_NVHE_SYM(__per_cpu_start);
 +DECLARE_KVM_NVHE_SYM(__per_cpu_end);
 +
  extern atomic_t arm64_el2_vector_last_slot;
  DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
  #define __bp_harden_hyp_vecs  CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
- #endif
  
  extern void __kvm_flush_vm_context(void);
  extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
@@@ -179,6 -144,26 +174,6 @@@ extern char __smccc_workaround_1_smc[__
                addr;                                                   \
        })
  
 -/*
 - * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
 - * provided that sym is really a *symbol* and not a pointer obtained from
 - * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
 - * sparse quiet.
 - */
 -#define __hyp_this_cpu_ptr(sym)                                               \
 -      ({                                                              \
 -              void *__ptr;                                            \
 -              __verify_pcpu_ptr(&sym);                                \
 -              __ptr = hyp_symbol_addr(sym);                           \
 -              __ptr += read_sysreg(tpidr_el2);                        \
 -              (typeof(sym) __kernel __force *)__ptr;                  \
 -       })
 -
 -#define __hyp_this_cpu_read(sym)                                      \
 -      ({                                                              \
 -              *__hyp_this_cpu_ptr(sym);                               \
 -       })
 -
  #define __KVM_EXTABLE(from, to)                                               \
        "       .pushsection    __kvm_ex_table, \"a\"\n"                \
        "       .align          3\n"                                    \
  
  #else /* __ASSEMBLY__ */
  
 -.macro hyp_adr_this_cpu reg, sym, tmp
 -      adr_l   \reg, \sym
 -      mrs     \tmp, tpidr_el2
 -      add     \reg, \reg, \tmp
 -.endm
 -
 -.macro hyp_ldr_this_cpu reg, sym, tmp
 -      adr_l   \reg, \sym
 -      mrs     \tmp, tpidr_el2
 -      ldr     \reg,  [\reg, \tmp]
 -.endm
 -
  .macro get_host_ctxt reg, tmp
 -      hyp_adr_this_cpu \reg, kvm_host_data, \tmp
 +      adr_this_cpu \reg, kvm_host_data, \tmp
        add     \reg, \reg, #HOST_DATA_CONTEXT
  .endm
  
index 964e05777fe3f11e2b67ff2fd2893f066dab1eab,8785ebf9bc292154826a18f3df276a0f094f4478..1247d1f30cb3af139e0385bec20d40db5b5b27e8
@@@ -565,7 -565,7 +565,7 @@@ void kvm_set_sei_esr(struct kvm_vcpu *v
  
  struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
  
 -DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
 +DECLARE_KVM_HYP_PER_CPU(kvm_host_data_t, kvm_host_data);
  
  static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
  {
@@@ -631,46 -631,6 +631,6 @@@ static inline void kvm_set_pmu_events(u
  static inline void kvm_clr_pmu_events(u32 clr) {}
  #endif
  
- #define KVM_BP_HARDEN_UNKNOWN         -1
- #define KVM_BP_HARDEN_WA_NEEDED               0
- #define KVM_BP_HARDEN_NOT_REQUIRED    1
- static inline int kvm_arm_harden_branch_predictor(void)
- {
-       switch (get_spectre_v2_workaround_state()) {
-       case ARM64_BP_HARDEN_WA_NEEDED:
-               return KVM_BP_HARDEN_WA_NEEDED;
-       case ARM64_BP_HARDEN_NOT_REQUIRED:
-               return KVM_BP_HARDEN_NOT_REQUIRED;
-       case ARM64_BP_HARDEN_UNKNOWN:
-       default:
-               return KVM_BP_HARDEN_UNKNOWN;
-       }
- }
- #define KVM_SSBD_UNKNOWN              -1
- #define KVM_SSBD_FORCE_DISABLE                0
- #define KVM_SSBD_KERNEL               1
- #define KVM_SSBD_FORCE_ENABLE         2
- #define KVM_SSBD_MITIGATED            3
- static inline int kvm_arm_have_ssbd(void)
- {
-       switch (arm64_get_ssbd_state()) {
-       case ARM64_SSBD_FORCE_DISABLE:
-               return KVM_SSBD_FORCE_DISABLE;
-       case ARM64_SSBD_KERNEL:
-               return KVM_SSBD_KERNEL;
-       case ARM64_SSBD_FORCE_ENABLE:
-               return KVM_SSBD_FORCE_ENABLE;
-       case ARM64_SSBD_MITIGATED:
-               return KVM_SSBD_MITIGATED;
-       case ARM64_SSBD_UNKNOWN:
-       default:
-               return KVM_SSBD_UNKNOWN;
-       }
- }
  void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
  void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
  
index 80da861b8180cc34d577fcba2d9fe2dfd6c6500a,d0f3f35dd0d708779adf16178c64af618a65dfe0..fbd4b6b1fde5d3ffc90b425b14aa8da214fc5139
@@@ -61,12 -61,14 +61,11 @@@ __efistub__ctype           = _ctype
   * memory mappings.
   */
  
 -#define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
 -
  /* Alternative callbacks for init-time patching of nVHE hyp code. */
- KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
  KVM_NVHE_ALIAS(kvm_patch_vector_branch);
  KVM_NVHE_ALIAS(kvm_update_va_mask);
  
  /* Global kernel state accessed by nVHE hyp code. */
 -KVM_NVHE_ALIAS(kvm_host_data);
  KVM_NVHE_ALIAS(kvm_vgic_global_state);
  
  /* Kernel constant needed to compute idmap addresses. */
diff --combined arch/arm64/kvm/arm.c
index 92c88deea3575e83bbeaac869e20b828415b14fd,13e559ac7235b175035625a4af8190be6990ac9a..f8388da6f3c76e28c6b18ecadb78100e7b2d60ac
@@@ -46,8 -46,8 +46,8 @@@
  __asm__(".arch_extension      virt");
  #endif
  
 -DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
  static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
 +unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
  
  /* The VMID used in the VTTBR */
  static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
@@@ -1256,19 -1256,40 +1256,53 @@@ long kvm_arch_vm_ioctl(struct file *fil
        }
  }
  
 +static unsigned long nvhe_percpu_size(void)
 +{
 +      return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) -
 +              (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start);
 +}
 +
 +static unsigned long nvhe_percpu_order(void)
 +{
 +      unsigned long size = nvhe_percpu_size();
 +
 +      return size ? get_order(size) : 0;
 +}
 +
+ static int kvm_map_vectors(void)
+ {
+       /*
+        * SV2  = ARM64_SPECTRE_V2
+        * HEL2 = ARM64_HARDEN_EL2_VECTORS
+        *
+        * !SV2 + !HEL2 -> use direct vectors
+        *  SV2 + !HEL2 -> use hardened vectors in place
+        * !SV2 +  HEL2 -> allocate one vector slot and use exec mapping
+        *  SV2 +  HEL2 -> use hardened vectors and use exec mapping
+        */
+       if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
+               __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
+               __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+       }
+       if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
+               phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
+               unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
+               /*
+                * Always allocate a spare vector slot, as we don't
+                * know yet which CPUs have a BP hardening slot that
+                * we can reuse.
+                */
+               __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+               BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
+               return create_hyp_exec_mappings(vect_pa, size,
+                                               &__kvm_bp_vect_base);
+       }
+       return 0;
+ }
  static void cpu_init_hyp_mode(void)
  {
        phys_addr_t pgd_ptr;
         * kernel's mapping to the linear mapping, and store it in tpidr_el2
         * so that we can use adr_l to access per-cpu variables in EL2.
         */
 -      tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) -
 -                   (unsigned long)kvm_ksym_ref(&kvm_host_data));
 +      tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
 +                  (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
  
        pgd_ptr = kvm_mmu_get_httbr();
        hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
         * at EL2.
         */
        if (this_cpu_has_cap(ARM64_SSBS) &&
-           arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+           arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) {
                kvm_call_hyp_nvhe(__kvm_enable_ssbs);
        }
-       /* Copy information whether SSBD callback is required to hyp. */
-       hyp_init_aux_data();
  }
  
  static void cpu_hyp_reset(void)
  
  static void cpu_hyp_reinit(void)
  {
 -      kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
 +      kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
  
        cpu_hyp_reset();
  
@@@ -1475,10 -1493,8 +1506,10 @@@ static void teardown_hyp_mode(void
        int cpu;
  
        free_hyp_pgds();
 -      for_each_possible_cpu(cpu)
 +      for_each_possible_cpu(cpu) {
                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
 +              free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
 +      }
  }
  
  /**
@@@ -1511,24 -1527,6 +1542,24 @@@ static int init_hyp_mode(void
                per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
        }
  
 +      /*
 +       * Allocate and initialize pages for Hypervisor-mode percpu regions.
 +       */
 +      for_each_possible_cpu(cpu) {
 +              struct page *page;
 +              void *page_addr;
 +
 +              page = alloc_pages(GFP_KERNEL, nvhe_percpu_order());
 +              if (!page) {
 +                      err = -ENOMEM;
 +                      goto out_err;
 +              }
 +
 +              page_addr = page_address(page);
 +              memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
 +              kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
 +      }
 +
        /*
         * Map the Hyp-code called directly from the host
         */
                }
        }
  
 +      /*
 +       * Map Hyp percpu pages
 +       */
        for_each_possible_cpu(cpu) {
 -              kvm_host_data_t *cpu_data;
 +              char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
 +              char *percpu_end = percpu_begin + nvhe_percpu_size();
  
 -              cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
 -              err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
 +              err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
  
                if (err) {
 -                      kvm_err("Cannot map host CPU state: %d\n", err);
 +                      kvm_err("Cannot map hyp percpu region\n");
                        goto out_err;
                }
        }
index f150407fa7981003729010cd3812c48a8f315c24,a6840823b60ef0e92a0ec5970c71fd5e30f65d1d..0d656914f4210eec10a59daf616584dcf36159ac
@@@ -386,7 -386,7 +386,7 @@@ static inline bool __hyp_handle_ptrauth
            !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
                return false;
  
 -      ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
 +      ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        __ptrauth_save_key(ctxt, APIA);
        __ptrauth_save_key(ctxt, APIB);
        __ptrauth_save_key(ctxt, APDA);
@@@ -479,39 -479,6 +479,6 @@@ exit
        return false;
  }
  
- static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
- {
-       if (!cpus_have_final_cap(ARM64_SSBD))
-               return false;
-       return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
- }
- static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
- {
- #ifdef CONFIG_ARM64_SSBD
-       /*
-        * The host runs with the workaround always present. If the
-        * guest wants it disabled, so be it...
-        */
-       if (__needs_ssbd_off(vcpu) &&
-           __this_cpu_read(arm64_ssbd_callback_required))
-               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
- #endif
- }
- static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
- {
- #ifdef CONFIG_ARM64_SSBD
-       /*
-        * If the guest has disabled the workaround, bring it back on.
-        */
-       if (__needs_ssbd_off(vcpu) &&
-           __this_cpu_read(arm64_ssbd_callback_required))
-               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
- #endif
- }
  static inline void __kvm_unexpected_el2_exception(void)
  {
        unsigned long addr, fixup;
  
        entry = hyp_symbol_addr(__start___kvm_ex_table);
        end = hyp_symbol_addr(__stop___kvm_ex_table);
 -      host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
 +      host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
  
        while (entry < end) {
                addr = (unsigned long)&entry->insn + entry->insn;
index a7e9b03bd9d1bbb174d0428793adb94fa29f7555,8d3dd4f479244c3080da9a32b70fb6b3ce216ec8..4472558cbdd95404f25e10880a975093442b5f83
  #include <asm/processor.h>
  #include <asm/thread_info.h>
  
 +/* Non-VHE copy of the kernel symbol. */
 +DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 +
 +/* Non-VHE instance of kvm_host_data. */
 +DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
 +
  static void __activate_traps(struct kvm_vcpu *vcpu)
  {
        u64 val;
@@@ -181,7 -175,7 +181,7 @@@ int __kvm_vcpu_run(struct kvm_vcpu *vcp
  
        vcpu = kern_hyp_va(vcpu);
  
 -      host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
 +      host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        host_ctxt->__hyp_running_vcpu = vcpu;
        guest_ctxt = &vcpu->arch.ctxt;
  
  
        __debug_switch_to_guest(vcpu);
  
-       __set_guest_arch_workaround_state(vcpu);
        do {
                /* Jump in the fire! */
                exit_code = __guest_enter(vcpu, host_ctxt);
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
  
-       __set_host_arch_workaround_state(vcpu);
        __sysreg_save_state_nvhe(guest_ctxt);
        __sysreg32_save_state(vcpu);
        __timer_disable_traps(vcpu);
index 0949fc97bf03fb2c23930b4f9969abe5667068d1,ecf67e678203f8cd70a7b136918a2e45ea9e995b..a8d40753279855c160bbf6a56342e3859f3e3257
@@@ -28,9 -28,6 +28,9 @@@
  
  const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
  
 +/* VHE instance of kvm_host_data. */
 +DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
 +
  static void __activate_traps(struct kvm_vcpu *vcpu)
  {
        u64 val;
@@@ -111,7 -108,7 +111,7 @@@ static int __kvm_vcpu_run_vhe(struct kv
        struct kvm_cpu_context *guest_ctxt;
        u64 exit_code;
  
 -      host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
 +      host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
        host_ctxt->__hyp_running_vcpu = vcpu;
        guest_ctxt = &vcpu->arch.ctxt;
  
        sysreg_restore_guest_state_vhe(guest_ctxt);
        __debug_switch_to_guest(vcpu);
  
-       __set_guest_arch_workaround_state(vcpu);
        do {
                /* Jump in the fire! */
                exit_code = __guest_enter(vcpu, host_ctxt);
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
  
-       __set_host_arch_workaround_state(vcpu);
        sysreg_save_guest_state_vhe(guest_ctxt);
  
        __deactivate_traps(vcpu);