2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/arm-smccc.h>
19 #include <linux/types.h>
20 #include <linux/jump_label.h>
21 #include <uapi/linux/psci.h>
23 #include <kvm/arm_psci.h>
25 #include <asm/arch_gicv3.h>
26 #include <asm/cpufeature.h>
27 #include <asm/kprobes.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_host.h>
31 #include <asm/kvm_hyp.h>
32 #include <asm/kvm_mmu.h>
33 #include <asm/fpsimd.h>
34 #include <asm/debug-monitors.h>
35 #include <asm/processor.h>
36 #include <asm/thread_info.h>
38 /* Check whether the FP regs were dirtied while in the host-side run loop: */
39 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
41 if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
42 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
45 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
48 /* Save the 32-bit only FPSIMD system register state */
49 static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
51 if (!vcpu_el1_is_32bit(vcpu))
54 vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
57 static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
60 * We are about to set CPTR_EL2.TFP to trap all floating point
61 * register accesses to EL2, however, the ARM ARM clearly states that
62 * traps are only taken to EL2 if the operation would not otherwise
63 * trap to EL1. Therefore, always make sure that for 32-bit guests,
64 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
65 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
66 * it will cause an exception.
68 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
69 write_sysreg(1 << 30, fpexc32_el2);
74 static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
76 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
77 write_sysreg(1 << 15, hstr_el2);
80 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
81 * PMSELR_EL0 to make sure it never contains the cycle
82 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
83 * EL1 instead of being trapped to EL2.
85 write_sysreg(0, pmselr_el0);
86 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
87 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
90 static void __hyp_text __deactivate_traps_common(void)
92 write_sysreg(0, hstr_el2);
93 write_sysreg(0, pmuserenr_el0);
96 static void activate_traps_vhe(struct kvm_vcpu *vcpu)
100 val = read_sysreg(cpacr_el1);
101 val |= CPACR_EL1_TTA;
102 val &= ~CPACR_EL1_ZEN;
103 if (update_fp_enabled(vcpu)) {
104 if (vcpu_has_sve(vcpu))
105 val |= CPACR_EL1_ZEN;
107 val &= ~CPACR_EL1_FPEN;
108 __activate_traps_fpsimd32(vcpu);
111 write_sysreg(val, cpacr_el1);
113 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
115 NOKPROBE_SYMBOL(activate_traps_vhe);
117 static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
121 __activate_traps_common(vcpu);
123 val = CPTR_EL2_DEFAULT;
124 val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
125 if (!update_fp_enabled(vcpu)) {
127 __activate_traps_fpsimd32(vcpu);
130 write_sysreg(val, cptr_el2);
133 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
135 u64 hcr = vcpu->arch.hcr_el2;
137 write_sysreg(hcr, hcr_el2);
139 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
140 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
143 activate_traps_vhe(vcpu);
145 __activate_traps_nvhe(vcpu);
148 static void deactivate_traps_vhe(void)
150 extern char vectors[]; /* kernel exception vectors */
151 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
154 * ARM erratum 1165522 requires the actual execution of the above
155 * before we can switch to the EL2/EL0 translation regime used by
158 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
160 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
161 write_sysreg(vectors, vbar_el1);
163 NOKPROBE_SYMBOL(deactivate_traps_vhe);
165 static void __hyp_text __deactivate_traps_nvhe(void)
167 u64 mdcr_el2 = read_sysreg(mdcr_el2);
169 __deactivate_traps_common();
171 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
172 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
174 write_sysreg(mdcr_el2, mdcr_el2);
175 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
176 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
179 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
182 * If we pended a virtual abort, preserve it until it gets
183 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
184 * the crucial bit is "On taking a vSError interrupt,
185 * HCR_EL2.VSE is cleared to 0."
187 if (vcpu->arch.hcr_el2 & HCR_VSE)
188 vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
191 deactivate_traps_vhe();
193 __deactivate_traps_nvhe();
196 void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
198 __activate_traps_common(vcpu);
201 void deactivate_traps_vhe_put(void)
203 u64 mdcr_el2 = read_sysreg(mdcr_el2);
205 mdcr_el2 &= MDCR_EL2_HPMN_MASK |
206 MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
209 write_sysreg(mdcr_el2, mdcr_el2);
211 __deactivate_traps_common();
214 static void __hyp_text __activate_vm(struct kvm *kvm)
216 __load_guest_stage2(kvm);
219 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
221 write_sysreg(0, vttbr_el2);
224 /* Save VGICv3 state on non-VHE systems */
225 static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
227 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
228 __vgic_v3_save_state(vcpu);
229 __vgic_v3_deactivate_traps(vcpu);
233 /* Restore VGICv3 state on non_VEH systems */
234 static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
236 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
237 __vgic_v3_activate_traps(vcpu);
238 __vgic_v3_restore_state(vcpu);
242 static bool __hyp_text __true_value(void)
247 static bool __hyp_text __false_value(void)
252 static hyp_alternate_select(__check_arm_834220,
253 __false_value, __true_value,
254 ARM64_WORKAROUND_834220);
256 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
261 * Resolve the IPA the hard way using the guest VA.
263 * Stage-1 translation already validated the memory access
264 * rights. As such, we can use the EL1 translation regime, and
265 * don't have to distinguish between EL0 and EL1 access.
267 * We do need to save/restore PAR_EL1 though, as we haven't
268 * saved the guest context yet, and we may return early...
270 par = read_sysreg(par_el1);
271 asm volatile("at s1e1r, %0" : : "r" (far));
274 tmp = read_sysreg(par_el1);
275 write_sysreg(par, par_el1);
277 if (unlikely(tmp & 1))
278 return false; /* Translation failed, back to guest */
280 /* Convert PAR to HPFAR format */
281 *hpfar = PAR_TO_HPFAR(tmp);
285 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
291 esr = vcpu->arch.fault.esr_el2;
292 ec = ESR_ELx_EC(esr);
294 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
297 far = read_sysreg_el2(far);
300 * The HPFAR can be invalid if the stage 2 fault did not
301 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
302 * bit is clear) and one of the two following cases are true:
303 * 1. The fault was due to a permission fault
304 * 2. The processor carries errata 834220
306 * Therefore, for all non S1PTW faults where we either have a
307 * permission fault or the errata workaround is enabled, we
308 * resolve the IPA using the AT instruction.
310 if (!(esr & ESR_ELx_S1PTW) &&
311 (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
312 if (!__translate_far_to_hpfar(far, &hpfar))
315 hpfar = read_sysreg(hpfar_el2);
318 vcpu->arch.fault.far_el2 = far;
319 vcpu->arch.fault.hpfar_el2 = hpfar;
323 /* Check for an FPSIMD/SVE trap and handle as appropriate */
324 static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
326 bool vhe, sve_guest, sve_host;
329 if (!system_supports_fpsimd())
332 if (system_supports_sve()) {
333 sve_guest = vcpu_has_sve(vcpu);
334 sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
342 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
343 if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
344 hsr_ec != ESR_ELx_EC_SVE)
347 /* Don't handle SVE traps for non-SVE vcpus here: */
349 if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
352 /* Valid trap. Switch the context: */
355 u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
358 reg |= CPACR_EL1_ZEN;
360 write_sysreg(reg, cpacr_el1);
362 write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
368 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
370 * In the SVE case, VHE is assumed: it is enforced by
371 * Kconfig and kvm_arch_init().
374 struct thread_struct *thread = container_of(
375 vcpu->arch.host_fpsimd_state,
376 struct thread_struct, uw.fpsimd_state);
378 sve_save_state(sve_pffr(thread),
379 &vcpu->arch.host_fpsimd_state->fpsr);
381 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
384 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
388 sve_load_state(vcpu_sve_pffr(vcpu),
389 &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
390 sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
391 write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
393 __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
396 /* Skip restoring fpexc32 for AArch64 guests */
397 if (!(read_sysreg(hcr_el2) & HCR_RW))
398 write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
401 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
407 * Return true when we were able to fixup the guest exit and should return to
408 * the guest, false when we should restore the host state and return to the
411 static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
413 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
414 vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr);
417 * We're using the raw exception code in order to only process
418 * the trap if no SError is pending. We will come back to the
419 * same PC once the SError has been injected, and replay the
420 * trapping instruction.
422 if (*exit_code != ARM_EXCEPTION_TRAP)
426 * We trap the first access to the FP/SIMD to save the host context
427 * and restore the guest context lazily.
428 * If FP/SIMD is not implemented, handle the trap and inject an
429 * undefined instruction exception to the guest.
430 * Similarly for trapped SVE accesses.
432 if (__hyp_handle_fpsimd(vcpu))
435 if (!__populate_fault_info(vcpu))
438 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
441 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
442 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
443 kvm_vcpu_dabt_isvalid(vcpu) &&
444 !kvm_vcpu_dabt_isextabt(vcpu) &&
445 !kvm_vcpu_dabt_iss1tw(vcpu);
448 int ret = __vgic_v2_perform_cpuif_access(vcpu);
453 /* Promote an illegal access to an SError.*/
455 *exit_code = ARM_EXCEPTION_EL1_SERROR;
461 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
462 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
463 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
464 int ret = __vgic_v3_perform_cpuif_access(vcpu);
471 /* Return to the host kernel and handle the exit */
475 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
477 if (!cpus_have_const_cap(ARM64_SSBD))
480 return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
483 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
485 #ifdef CONFIG_ARM64_SSBD
487 * The host runs with the workaround always present. If the
488 * guest wants it disabled, so be it...
490 if (__needs_ssbd_off(vcpu) &&
491 __hyp_this_cpu_read(arm64_ssbd_callback_required))
492 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
496 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
498 #ifdef CONFIG_ARM64_SSBD
500 * If the guest has disabled the workaround, bring it back on.
502 if (__needs_ssbd_off(vcpu) &&
503 __hyp_this_cpu_read(arm64_ssbd_callback_required))
504 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
508 /* Switch to the guest for VHE systems running in EL2 */
509 int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
511 struct kvm_cpu_context *host_ctxt;
512 struct kvm_cpu_context *guest_ctxt;
515 host_ctxt = vcpu->arch.host_cpu_context;
516 host_ctxt->__hyp_running_vcpu = vcpu;
517 guest_ctxt = &vcpu->arch.ctxt;
519 sysreg_save_host_state_vhe(host_ctxt);
522 * ARM erratum 1165522 requires us to configure both stage 1 and
523 * stage 2 translation for the guest context before we clear
526 * We have already configured the guest's stage 1 translation in
527 * kvm_vcpu_load_sysregs above. We must now call __activate_vm
528 * before __activate_traps, because __activate_vm configures
529 * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
530 * (among other things).
532 __activate_vm(vcpu->kvm);
533 __activate_traps(vcpu);
535 sysreg_restore_guest_state_vhe(guest_ctxt);
536 __debug_switch_to_guest(vcpu);
538 __set_guest_arch_workaround_state(vcpu);
541 /* Jump in the fire! */
542 exit_code = __guest_enter(vcpu, host_ctxt);
544 /* And we're baaack! */
545 } while (fixup_guest_exit(vcpu, &exit_code));
547 __set_host_arch_workaround_state(vcpu);
549 sysreg_save_guest_state_vhe(guest_ctxt);
551 __deactivate_traps(vcpu);
553 sysreg_restore_host_state_vhe(host_ctxt);
555 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
556 __fpsimd_save_fpexc32(vcpu);
558 __debug_switch_to_host(vcpu);
562 NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
564 /* Switch to the guest for legacy non-VHE systems */
565 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
567 struct kvm_cpu_context *host_ctxt;
568 struct kvm_cpu_context *guest_ctxt;
569 bool pmu_switch_needed;
573 * Having IRQs masked via PMR when entering the guest means the GIC
574 * will not signal the CPU of interrupts of lower priority, and the
575 * only way to get out will be via guest exceptions.
576 * Naturally, we want to avoid this.
578 if (system_uses_irq_prio_masking()) {
579 gic_write_pmr(GIC_PRIO_IRQON);
583 vcpu = kern_hyp_va(vcpu);
585 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
586 host_ctxt->__hyp_running_vcpu = vcpu;
587 guest_ctxt = &vcpu->arch.ctxt;
589 pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
591 __sysreg_save_state_nvhe(host_ctxt);
593 __activate_vm(kern_hyp_va(vcpu->kvm));
594 __activate_traps(vcpu);
596 __hyp_vgic_restore_state(vcpu);
597 __timer_enable_traps(vcpu);
600 * We must restore the 32-bit state before the sysregs, thanks
601 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
603 __sysreg32_restore_state(vcpu);
604 __sysreg_restore_state_nvhe(guest_ctxt);
605 __debug_switch_to_guest(vcpu);
607 __set_guest_arch_workaround_state(vcpu);
610 /* Jump in the fire! */
611 exit_code = __guest_enter(vcpu, host_ctxt);
613 /* And we're baaack! */
614 } while (fixup_guest_exit(vcpu, &exit_code));
616 __set_host_arch_workaround_state(vcpu);
618 __sysreg_save_state_nvhe(guest_ctxt);
619 __sysreg32_save_state(vcpu);
620 __timer_disable_traps(vcpu);
621 __hyp_vgic_save_state(vcpu);
623 __deactivate_traps(vcpu);
624 __deactivate_vm(vcpu);
626 __sysreg_restore_state_nvhe(host_ctxt);
628 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
629 __fpsimd_save_fpexc32(vcpu);
632 * This must come after restoring the host sysregs, since a non-VHE
633 * system may enable SPE here and make use of the TTBRs.
635 __debug_switch_to_host(vcpu);
637 if (pmu_switch_needed)
638 __pmu_switch_to_host(host_ctxt);
640 /* Returning to host will clear PSR.I, remask PMR if needed */
641 if (system_uses_irq_prio_masking())
642 gic_write_pmr(GIC_PRIO_IRQOFF);
647 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
649 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
650 struct kvm_cpu_context *__host_ctxt)
652 struct kvm_vcpu *vcpu;
653 unsigned long str_va;
655 vcpu = __host_ctxt->__hyp_running_vcpu;
657 if (read_sysreg(vttbr_el2)) {
658 __timer_disable_traps(vcpu);
659 __deactivate_traps(vcpu);
660 __deactivate_vm(vcpu);
661 __sysreg_restore_state_nvhe(__host_ctxt);
665 * Force the panic string to be loaded from the literal pool,
666 * making sure it is a kernel address and not a PC-relative
669 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
671 __hyp_do_panic(str_va,
673 read_sysreg(esr_el2), read_sysreg_el2(far),
674 read_sysreg(hpfar_el2), par, vcpu);
677 static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
678 struct kvm_cpu_context *host_ctxt)
680 struct kvm_vcpu *vcpu;
681 vcpu = host_ctxt->__hyp_running_vcpu;
683 __deactivate_traps(vcpu);
684 sysreg_restore_host_state_vhe(host_ctxt);
686 panic(__hyp_panic_string,
688 read_sysreg_el2(esr), read_sysreg_el2(far),
689 read_sysreg(hpfar_el2), par, vcpu);
691 NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
693 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
695 u64 spsr = read_sysreg_el2(spsr);
696 u64 elr = read_sysreg_el2(elr);
697 u64 par = read_sysreg(par_el1);
700 __hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
702 __hyp_call_panic_vhe(spsr, elr, par, host_ctxt);