Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / arm64 / kvm / hyp / switch.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6
7 #include <linux/arm-smccc.h>
8 #include <linux/kvm_host.h>
9 #include <linux/types.h>
10 #include <linux/jump_label.h>
11 #include <uapi/linux/psci.h>
12
13 #include <kvm/arm_psci.h>
14
15 #include <asm/arch_gicv3.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kprobes.h>
18 #include <asm/kvm_asm.h>
19 #include <asm/kvm_emulate.h>
20 #include <asm/kvm_host.h>
21 #include <asm/kvm_hyp.h>
22 #include <asm/kvm_mmu.h>
23 #include <asm/fpsimd.h>
24 #include <asm/debug-monitors.h>
25 #include <asm/processor.h>
26 #include <asm/thread_info.h>
27
28 /* Check whether the FP regs were dirtied while in the host-side run loop: */
29 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
30 {
31         if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
32                 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
33                                       KVM_ARM64_FP_HOST);
34
35         return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
36 }
37
38 /* Save the 32-bit only FPSIMD system register state */
39 static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
40 {
41         if (!vcpu_el1_is_32bit(vcpu))
42                 return;
43
44         vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
45 }
46
47 static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
48 {
49         /*
50          * We are about to set CPTR_EL2.TFP to trap all floating point
51          * register accesses to EL2, however, the ARM ARM clearly states that
52          * traps are only taken to EL2 if the operation would not otherwise
53          * trap to EL1.  Therefore, always make sure that for 32-bit guests,
54          * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
55          * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
56          * it will cause an exception.
57          */
58         if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
59                 write_sysreg(1 << 30, fpexc32_el2);
60                 isb();
61         }
62 }
63
64 static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
65 {
66         /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
67         write_sysreg(1 << 15, hstr_el2);
68
69         /*
70          * Make sure we trap PMU access from EL0 to EL2. Also sanitize
71          * PMSELR_EL0 to make sure it never contains the cycle
72          * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
73          * EL1 instead of being trapped to EL2.
74          */
75         write_sysreg(0, pmselr_el0);
76         write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
77         write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
78 }
79
80 static void __hyp_text __deactivate_traps_common(void)
81 {
82         write_sysreg(0, hstr_el2);
83         write_sysreg(0, pmuserenr_el0);
84 }
85
86 static void activate_traps_vhe(struct kvm_vcpu *vcpu)
87 {
88         u64 val;
89
90         val = read_sysreg(cpacr_el1);
91         val |= CPACR_EL1_TTA;
92         val &= ~CPACR_EL1_ZEN;
93         if (update_fp_enabled(vcpu)) {
94                 if (vcpu_has_sve(vcpu))
95                         val |= CPACR_EL1_ZEN;
96         } else {
97                 val &= ~CPACR_EL1_FPEN;
98                 __activate_traps_fpsimd32(vcpu);
99         }
100
101         write_sysreg(val, cpacr_el1);
102
103         write_sysreg(kvm_get_hyp_vector(), vbar_el1);
104 }
105 NOKPROBE_SYMBOL(activate_traps_vhe);
106
107 static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
108 {
109         u64 val;
110
111         __activate_traps_common(vcpu);
112
113         val = CPTR_EL2_DEFAULT;
114         val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
115         if (!update_fp_enabled(vcpu)) {
116                 val |= CPTR_EL2_TFP;
117                 __activate_traps_fpsimd32(vcpu);
118         }
119
120         write_sysreg(val, cptr_el2);
121 }
122
123 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
124 {
125         u64 hcr = vcpu->arch.hcr_el2;
126
127         write_sysreg(hcr, hcr_el2);
128
129         if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
130                 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
131
132         if (has_vhe())
133                 activate_traps_vhe(vcpu);
134         else
135                 __activate_traps_nvhe(vcpu);
136 }
137
138 static void deactivate_traps_vhe(void)
139 {
140         extern char vectors[];  /* kernel exception vectors */
141         write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
142
143         /*
144          * ARM erratum 1165522 requires the actual execution of the above
145          * before we can switch to the EL2/EL0 translation regime used by
146          * the host.
147          */
148         asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
149
150         write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
151         write_sysreg(vectors, vbar_el1);
152 }
153 NOKPROBE_SYMBOL(deactivate_traps_vhe);
154
155 static void __hyp_text __deactivate_traps_nvhe(void)
156 {
157         u64 mdcr_el2 = read_sysreg(mdcr_el2);
158
159         __deactivate_traps_common();
160
161         mdcr_el2 &= MDCR_EL2_HPMN_MASK;
162         mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
163
164         write_sysreg(mdcr_el2, mdcr_el2);
165         write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
166         write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
167 }
168
169 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
170 {
171         /*
172          * If we pended a virtual abort, preserve it until it gets
173          * cleared. See D1.14.3 (Virtual Interrupts) for details, but
174          * the crucial bit is "On taking a vSError interrupt,
175          * HCR_EL2.VSE is cleared to 0."
176          */
177         if (vcpu->arch.hcr_el2 & HCR_VSE)
178                 vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
179
180         if (has_vhe())
181                 deactivate_traps_vhe();
182         else
183                 __deactivate_traps_nvhe();
184 }
185
186 void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
187 {
188         __activate_traps_common(vcpu);
189 }
190
191 void deactivate_traps_vhe_put(void)
192 {
193         u64 mdcr_el2 = read_sysreg(mdcr_el2);
194
195         mdcr_el2 &= MDCR_EL2_HPMN_MASK |
196                     MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
197                     MDCR_EL2_TPMS;
198
199         write_sysreg(mdcr_el2, mdcr_el2);
200
201         __deactivate_traps_common();
202 }
203
204 static void __hyp_text __activate_vm(struct kvm *kvm)
205 {
206         __load_guest_stage2(kvm);
207 }
208
209 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
210 {
211         write_sysreg(0, vttbr_el2);
212 }
213
214 /* Save VGICv3 state on non-VHE systems */
215 static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
216 {
217         if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
218                 __vgic_v3_save_state(vcpu);
219                 __vgic_v3_deactivate_traps(vcpu);
220         }
221 }
222
223 /* Restore VGICv3 state on non_VEH systems */
224 static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
225 {
226         if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
227                 __vgic_v3_activate_traps(vcpu);
228                 __vgic_v3_restore_state(vcpu);
229         }
230 }
231
232 static bool __hyp_text __true_value(void)
233 {
234         return true;
235 }
236
237 static bool __hyp_text __false_value(void)
238 {
239         return false;
240 }
241
242 static hyp_alternate_select(__check_arm_834220,
243                             __false_value, __true_value,
244                             ARM64_WORKAROUND_834220);
245
246 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
247 {
248         u64 par, tmp;
249
250         /*
251          * Resolve the IPA the hard way using the guest VA.
252          *
253          * Stage-1 translation already validated the memory access
254          * rights. As such, we can use the EL1 translation regime, and
255          * don't have to distinguish between EL0 and EL1 access.
256          *
257          * We do need to save/restore PAR_EL1 though, as we haven't
258          * saved the guest context yet, and we may return early...
259          */
260         par = read_sysreg(par_el1);
261         asm volatile("at s1e1r, %0" : : "r" (far));
262         isb();
263
264         tmp = read_sysreg(par_el1);
265         write_sysreg(par, par_el1);
266
267         if (unlikely(tmp & SYS_PAR_EL1_F))
268                 return false; /* Translation failed, back to guest */
269
270         /* Convert PAR to HPFAR format */
271         *hpfar = PAR_TO_HPFAR(tmp);
272         return true;
273 }
274
275 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
276 {
277         u8 ec;
278         u64 esr;
279         u64 hpfar, far;
280
281         esr = vcpu->arch.fault.esr_el2;
282         ec = ESR_ELx_EC(esr);
283
284         if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
285                 return true;
286
287         far = read_sysreg_el2(SYS_FAR);
288
289         /*
290          * The HPFAR can be invalid if the stage 2 fault did not
291          * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
292          * bit is clear) and one of the two following cases are true:
293          *   1. The fault was due to a permission fault
294          *   2. The processor carries errata 834220
295          *
296          * Therefore, for all non S1PTW faults where we either have a
297          * permission fault or the errata workaround is enabled, we
298          * resolve the IPA using the AT instruction.
299          */
300         if (!(esr & ESR_ELx_S1PTW) &&
301             (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
302                 if (!__translate_far_to_hpfar(far, &hpfar))
303                         return false;
304         } else {
305                 hpfar = read_sysreg(hpfar_el2);
306         }
307
308         vcpu->arch.fault.far_el2 = far;
309         vcpu->arch.fault.hpfar_el2 = hpfar;
310         return true;
311 }
312
313 /* Check for an FPSIMD/SVE trap and handle as appropriate */
314 static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
315 {
316         bool vhe, sve_guest, sve_host;
317         u8 hsr_ec;
318
319         if (!system_supports_fpsimd())
320                 return false;
321
322         if (system_supports_sve()) {
323                 sve_guest = vcpu_has_sve(vcpu);
324                 sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
325                 vhe = true;
326         } else {
327                 sve_guest = false;
328                 sve_host = false;
329                 vhe = has_vhe();
330         }
331
332         hsr_ec = kvm_vcpu_trap_get_class(vcpu);
333         if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
334             hsr_ec != ESR_ELx_EC_SVE)
335                 return false;
336
337         /* Don't handle SVE traps for non-SVE vcpus here: */
338         if (!sve_guest)
339                 if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
340                         return false;
341
342         /* Valid trap.  Switch the context: */
343
344         if (vhe) {
345                 u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
346
347                 if (sve_guest)
348                         reg |= CPACR_EL1_ZEN;
349
350                 write_sysreg(reg, cpacr_el1);
351         } else {
352                 write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
353                              cptr_el2);
354         }
355
356         isb();
357
358         if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
359                 /*
360                  * In the SVE case, VHE is assumed: it is enforced by
361                  * Kconfig and kvm_arch_init().
362                  */
363                 if (sve_host) {
364                         struct thread_struct *thread = container_of(
365                                 vcpu->arch.host_fpsimd_state,
366                                 struct thread_struct, uw.fpsimd_state);
367
368                         sve_save_state(sve_pffr(thread),
369                                        &vcpu->arch.host_fpsimd_state->fpsr);
370                 } else {
371                         __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
372                 }
373
374                 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
375         }
376
377         if (sve_guest) {
378                 sve_load_state(vcpu_sve_pffr(vcpu),
379                                &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
380                                sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
381                 write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
382         } else {
383                 __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
384         }
385
386         /* Skip restoring fpexc32 for AArch64 guests */
387         if (!(read_sysreg(hcr_el2) & HCR_RW))
388                 write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
389                              fpexc32_el2);
390
391         vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
392
393         return true;
394 }
395
396 /*
397  * Return true when we were able to fixup the guest exit and should return to
398  * the guest, false when we should restore the host state and return to the
399  * main run loop.
400  */
401 static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
402 {
403         if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
404                 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
405
406         /*
407          * We're using the raw exception code in order to only process
408          * the trap if no SError is pending. We will come back to the
409          * same PC once the SError has been injected, and replay the
410          * trapping instruction.
411          */
412         if (*exit_code != ARM_EXCEPTION_TRAP)
413                 goto exit;
414
415         /*
416          * We trap the first access to the FP/SIMD to save the host context
417          * and restore the guest context lazily.
418          * If FP/SIMD is not implemented, handle the trap and inject an
419          * undefined instruction exception to the guest.
420          * Similarly for trapped SVE accesses.
421          */
422         if (__hyp_handle_fpsimd(vcpu))
423                 return true;
424
425         if (!__populate_fault_info(vcpu))
426                 return true;
427
428         if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
429                 bool valid;
430
431                 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
432                         kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
433                         kvm_vcpu_dabt_isvalid(vcpu) &&
434                         !kvm_vcpu_dabt_isextabt(vcpu) &&
435                         !kvm_vcpu_dabt_iss1tw(vcpu);
436
437                 if (valid) {
438                         int ret = __vgic_v2_perform_cpuif_access(vcpu);
439
440                         if (ret == 1)
441                                 return true;
442
443                         /* Promote an illegal access to an SError.*/
444                         if (ret == -1)
445                                 *exit_code = ARM_EXCEPTION_EL1_SERROR;
446
447                         goto exit;
448                 }
449         }
450
451         if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
452             (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
453              kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
454                 int ret = __vgic_v3_perform_cpuif_access(vcpu);
455
456                 if (ret == 1)
457                         return true;
458         }
459
460 exit:
461         /* Return to the host kernel and handle the exit */
462         return false;
463 }
464
465 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
466 {
467         if (!cpus_have_const_cap(ARM64_SSBD))
468                 return false;
469
470         return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
471 }
472
473 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
474 {
475 #ifdef CONFIG_ARM64_SSBD
476         /*
477          * The host runs with the workaround always present. If the
478          * guest wants it disabled, so be it...
479          */
480         if (__needs_ssbd_off(vcpu) &&
481             __hyp_this_cpu_read(arm64_ssbd_callback_required))
482                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
483 #endif
484 }
485
486 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
487 {
488 #ifdef CONFIG_ARM64_SSBD
489         /*
490          * If the guest has disabled the workaround, bring it back on.
491          */
492         if (__needs_ssbd_off(vcpu) &&
493             __hyp_this_cpu_read(arm64_ssbd_callback_required))
494                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
495 #endif
496 }
497
498 /**
499  * Disable host events, enable guest events
500  */
501 static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
502 {
503         struct kvm_host_data *host;
504         struct kvm_pmu_events *pmu;
505
506         host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
507         pmu = &host->pmu_events;
508
509         if (pmu->events_host)
510                 write_sysreg(pmu->events_host, pmcntenclr_el0);
511
512         if (pmu->events_guest)
513                 write_sysreg(pmu->events_guest, pmcntenset_el0);
514
515         return (pmu->events_host || pmu->events_guest);
516 }
517
518 /**
519  * Disable guest events, enable host events
520  */
521 static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
522 {
523         struct kvm_host_data *host;
524         struct kvm_pmu_events *pmu;
525
526         host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
527         pmu = &host->pmu_events;
528
529         if (pmu->events_guest)
530                 write_sysreg(pmu->events_guest, pmcntenclr_el0);
531
532         if (pmu->events_host)
533                 write_sysreg(pmu->events_host, pmcntenset_el0);
534 }
535
536 /* Switch to the guest for VHE systems running in EL2 */
537 int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
538 {
539         struct kvm_cpu_context *host_ctxt;
540         struct kvm_cpu_context *guest_ctxt;
541         u64 exit_code;
542
543         host_ctxt = vcpu->arch.host_cpu_context;
544         host_ctxt->__hyp_running_vcpu = vcpu;
545         guest_ctxt = &vcpu->arch.ctxt;
546
547         sysreg_save_host_state_vhe(host_ctxt);
548
549         /*
550          * ARM erratum 1165522 requires us to configure both stage 1 and
551          * stage 2 translation for the guest context before we clear
552          * HCR_EL2.TGE.
553          *
554          * We have already configured the guest's stage 1 translation in
555          * kvm_vcpu_load_sysregs above.  We must now call __activate_vm
556          * before __activate_traps, because __activate_vm configures
557          * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
558          * (among other things).
559          */
560         __activate_vm(vcpu->kvm);
561         __activate_traps(vcpu);
562
563         sysreg_restore_guest_state_vhe(guest_ctxt);
564         __debug_switch_to_guest(vcpu);
565
566         __set_guest_arch_workaround_state(vcpu);
567
568         do {
569                 /* Jump in the fire! */
570                 exit_code = __guest_enter(vcpu, host_ctxt);
571
572                 /* And we're baaack! */
573         } while (fixup_guest_exit(vcpu, &exit_code));
574
575         __set_host_arch_workaround_state(vcpu);
576
577         sysreg_save_guest_state_vhe(guest_ctxt);
578
579         __deactivate_traps(vcpu);
580
581         sysreg_restore_host_state_vhe(host_ctxt);
582
583         if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
584                 __fpsimd_save_fpexc32(vcpu);
585
586         __debug_switch_to_host(vcpu);
587
588         return exit_code;
589 }
590 NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
591
592 /* Switch to the guest for legacy non-VHE systems */
593 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
594 {
595         struct kvm_cpu_context *host_ctxt;
596         struct kvm_cpu_context *guest_ctxt;
597         bool pmu_switch_needed;
598         u64 exit_code;
599
600         /*
601          * Having IRQs masked via PMR when entering the guest means the GIC
602          * will not signal the CPU of interrupts of lower priority, and the
603          * only way to get out will be via guest exceptions.
604          * Naturally, we want to avoid this.
605          */
606         if (system_uses_irq_prio_masking()) {
607                 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
608                 dsb(sy);
609         }
610
611         vcpu = kern_hyp_va(vcpu);
612
613         host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
614         host_ctxt->__hyp_running_vcpu = vcpu;
615         guest_ctxt = &vcpu->arch.ctxt;
616
617         pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
618
619         __sysreg_save_state_nvhe(host_ctxt);
620
621         __activate_vm(kern_hyp_va(vcpu->kvm));
622         __activate_traps(vcpu);
623
624         __hyp_vgic_restore_state(vcpu);
625         __timer_enable_traps(vcpu);
626
627         /*
628          * We must restore the 32-bit state before the sysregs, thanks
629          * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
630          */
631         __sysreg32_restore_state(vcpu);
632         __sysreg_restore_state_nvhe(guest_ctxt);
633         __debug_switch_to_guest(vcpu);
634
635         __set_guest_arch_workaround_state(vcpu);
636
637         do {
638                 /* Jump in the fire! */
639                 exit_code = __guest_enter(vcpu, host_ctxt);
640
641                 /* And we're baaack! */
642         } while (fixup_guest_exit(vcpu, &exit_code));
643
644         __set_host_arch_workaround_state(vcpu);
645
646         __sysreg_save_state_nvhe(guest_ctxt);
647         __sysreg32_save_state(vcpu);
648         __timer_disable_traps(vcpu);
649         __hyp_vgic_save_state(vcpu);
650
651         __deactivate_traps(vcpu);
652         __deactivate_vm(vcpu);
653
654         __sysreg_restore_state_nvhe(host_ctxt);
655
656         if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
657                 __fpsimd_save_fpexc32(vcpu);
658
659         /*
660          * This must come after restoring the host sysregs, since a non-VHE
661          * system may enable SPE here and make use of the TTBRs.
662          */
663         __debug_switch_to_host(vcpu);
664
665         if (pmu_switch_needed)
666                 __pmu_switch_to_host(host_ctxt);
667
668         /* Returning to host will clear PSR.I, remask PMR if needed */
669         if (system_uses_irq_prio_masking())
670                 gic_write_pmr(GIC_PRIO_IRQOFF);
671
672         return exit_code;
673 }
674
675 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
676
677 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
678                                              struct kvm_cpu_context *__host_ctxt)
679 {
680         struct kvm_vcpu *vcpu;
681         unsigned long str_va;
682
683         vcpu = __host_ctxt->__hyp_running_vcpu;
684
685         if (read_sysreg(vttbr_el2)) {
686                 __timer_disable_traps(vcpu);
687                 __deactivate_traps(vcpu);
688                 __deactivate_vm(vcpu);
689                 __sysreg_restore_state_nvhe(__host_ctxt);
690         }
691
692         /*
693          * Force the panic string to be loaded from the literal pool,
694          * making sure it is a kernel address and not a PC-relative
695          * reference.
696          */
697         asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
698
699         __hyp_do_panic(str_va,
700                        spsr, elr,
701                        read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
702                        read_sysreg(hpfar_el2), par, vcpu);
703 }
704
705 static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
706                                  struct kvm_cpu_context *host_ctxt)
707 {
708         struct kvm_vcpu *vcpu;
709         vcpu = host_ctxt->__hyp_running_vcpu;
710
711         __deactivate_traps(vcpu);
712         sysreg_restore_host_state_vhe(host_ctxt);
713
714         panic(__hyp_panic_string,
715               spsr,  elr,
716               read_sysreg_el2(SYS_ESR),   read_sysreg_el2(SYS_FAR),
717               read_sysreg(hpfar_el2), par, vcpu);
718 }
719 NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
720
721 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
722 {
723         u64 spsr = read_sysreg_el2(SYS_SPSR);
724         u64 elr = read_sysreg_el2(SYS_ELR);
725         u64 par = read_sysreg(par_el1);
726
727         if (!has_vhe())
728                 __hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
729         else
730                 __hyp_call_panic_vhe(spsr, elr, par, host_ctxt);
731
732         unreachable();
733 }