2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/types.h>
19 #include <linux/jump_label.h>
20 #include <uapi/linux/psci.h>
22 #include <kvm/arm_psci.h>
24 #include <asm/kvm_asm.h>
25 #include <asm/kvm_emulate.h>
26 #include <asm/kvm_hyp.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/fpsimd.h>
29 #include <asm/debug-monitors.h>
31 static bool __hyp_text __fpsimd_enabled_nvhe(void)
33 return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
36 static bool fpsimd_enabled_vhe(void)
38 return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
41 /* Save the 32-bit only FPSIMD system register state */
42 static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
44 if (!vcpu_el1_is_32bit(vcpu))
47 vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
50 static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
53 * We are about to set CPTR_EL2.TFP to trap all floating point
54 * register accesses to EL2, however, the ARM ARM clearly states that
55 * traps are only taken to EL2 if the operation would not otherwise
56 * trap to EL1. Therefore, always make sure that for 32-bit guests,
57 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
58 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
59 * it will cause an exception.
61 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
62 write_sysreg(1 << 30, fpexc32_el2);
67 static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
69 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
70 write_sysreg(1 << 15, hstr_el2);
73 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
74 * PMSELR_EL0 to make sure it never contains the cycle
75 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
76 * EL1 instead of being trapped to EL2.
78 write_sysreg(0, pmselr_el0);
79 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
80 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
83 static void __hyp_text __deactivate_traps_common(void)
85 write_sysreg(0, hstr_el2);
86 write_sysreg(0, pmuserenr_el0);
89 static void activate_traps_vhe(struct kvm_vcpu *vcpu)
93 val = read_sysreg(cpacr_el1);
95 val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
96 write_sysreg(val, cpacr_el1);
98 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
101 static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
105 __activate_traps_common(vcpu);
107 val = CPTR_EL2_DEFAULT;
108 val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ;
109 write_sysreg(val, cptr_el2);
112 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
114 u64 hcr = vcpu->arch.hcr_el2;
116 write_sysreg(hcr, hcr_el2);
118 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
119 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
121 __activate_traps_fpsimd32(vcpu);
123 activate_traps_vhe(vcpu);
125 __activate_traps_nvhe(vcpu);
128 static void deactivate_traps_vhe(void)
130 extern char vectors[]; /* kernel exception vectors */
131 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
132 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
133 write_sysreg(vectors, vbar_el1);
136 static void __hyp_text __deactivate_traps_nvhe(void)
138 u64 mdcr_el2 = read_sysreg(mdcr_el2);
140 __deactivate_traps_common();
142 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
143 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
145 write_sysreg(mdcr_el2, mdcr_el2);
146 write_sysreg(HCR_RW, hcr_el2);
147 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
150 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
153 * If we pended a virtual abort, preserve it until it gets
154 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
155 * the crucial bit is "On taking a vSError interrupt,
156 * HCR_EL2.VSE is cleared to 0."
158 if (vcpu->arch.hcr_el2 & HCR_VSE)
159 vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
162 deactivate_traps_vhe();
164 __deactivate_traps_nvhe();
167 void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
169 __activate_traps_common(vcpu);
172 void deactivate_traps_vhe_put(void)
174 u64 mdcr_el2 = read_sysreg(mdcr_el2);
176 mdcr_el2 &= MDCR_EL2_HPMN_MASK |
177 MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
180 write_sysreg(mdcr_el2, mdcr_el2);
182 __deactivate_traps_common();
185 static void __hyp_text __activate_vm(struct kvm *kvm)
187 write_sysreg(kvm->arch.vttbr, vttbr_el2);
190 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
192 write_sysreg(0, vttbr_el2);
195 /* Save VGICv3 state on non-VHE systems */
196 static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
198 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
199 __vgic_v3_save_state(vcpu);
200 __vgic_v3_deactivate_traps(vcpu);
204 /* Restore VGICv3 state on non_VEH systems */
205 static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
207 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
208 __vgic_v3_activate_traps(vcpu);
209 __vgic_v3_restore_state(vcpu);
213 static bool __hyp_text __true_value(void)
218 static bool __hyp_text __false_value(void)
223 static hyp_alternate_select(__check_arm_834220,
224 __false_value, __true_value,
225 ARM64_WORKAROUND_834220);
227 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
232 * Resolve the IPA the hard way using the guest VA.
234 * Stage-1 translation already validated the memory access
235 * rights. As such, we can use the EL1 translation regime, and
236 * don't have to distinguish between EL0 and EL1 access.
238 * We do need to save/restore PAR_EL1 though, as we haven't
239 * saved the guest context yet, and we may return early...
241 par = read_sysreg(par_el1);
242 asm volatile("at s1e1r, %0" : : "r" (far));
245 tmp = read_sysreg(par_el1);
246 write_sysreg(par, par_el1);
248 if (unlikely(tmp & 1))
249 return false; /* Translation failed, back to guest */
251 /* Convert PAR to HPFAR format */
252 *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
256 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
262 esr = vcpu->arch.fault.esr_el2;
263 ec = ESR_ELx_EC(esr);
265 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
268 far = read_sysreg_el2(far);
271 * The HPFAR can be invalid if the stage 2 fault did not
272 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
273 * bit is clear) and one of the two following cases are true:
274 * 1. The fault was due to a permission fault
275 * 2. The processor carries errata 834220
277 * Therefore, for all non S1PTW faults where we either have a
278 * permission fault or the errata workaround is enabled, we
279 * resolve the IPA using the AT instruction.
281 if (!(esr & ESR_ELx_S1PTW) &&
282 (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
283 if (!__translate_far_to_hpfar(far, &hpfar))
286 hpfar = read_sysreg(hpfar_el2);
289 vcpu->arch.fault.far_el2 = far;
290 vcpu->arch.fault.hpfar_el2 = hpfar;
294 /* Skip an instruction which has been emulated. Returns true if
295 * execution can continue or false if we need to exit hyp mode because
296 * single-step was in effect.
298 static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
300 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
302 if (vcpu_mode_is_32bit(vcpu)) {
303 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
304 kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
305 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
310 write_sysreg_el2(*vcpu_pc(vcpu), elr);
312 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
313 vcpu->arch.fault.esr_el2 =
314 (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
322 * Return true when we were able to fixup the guest exit and should return to
323 * the guest, false when we should restore the host state and return to the
326 static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
328 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
329 vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr);
332 * We're using the raw exception code in order to only process
333 * the trap if no SError is pending. We will come back to the
334 * same PC once the SError has been injected, and replay the
335 * trapping instruction.
337 if (*exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
340 if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
341 *exit_code == ARM_EXCEPTION_TRAP) {
344 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
345 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
346 kvm_vcpu_dabt_isvalid(vcpu) &&
347 !kvm_vcpu_dabt_isextabt(vcpu) &&
348 !kvm_vcpu_dabt_iss1tw(vcpu);
351 int ret = __vgic_v2_perform_cpuif_access(vcpu);
354 if (__skip_instr(vcpu))
357 *exit_code = ARM_EXCEPTION_TRAP;
361 /* Promote an illegal access to an
362 * SError. If we would be returning
363 * due to single-step clear the SS
364 * bit so handle_exit knows what to
365 * do after dealing with the error.
367 if (!__skip_instr(vcpu))
368 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
369 *exit_code = ARM_EXCEPTION_EL1_SERROR;
374 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
375 *exit_code == ARM_EXCEPTION_TRAP &&
376 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
377 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
378 int ret = __vgic_v3_perform_cpuif_access(vcpu);
381 if (__skip_instr(vcpu))
384 *exit_code = ARM_EXCEPTION_TRAP;
388 /* Return to the host kernel and handle the exit */
392 /* Switch to the guest for VHE systems running in EL2 */
393 int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
395 struct kvm_cpu_context *host_ctxt;
396 struct kvm_cpu_context *guest_ctxt;
400 host_ctxt = vcpu->arch.host_cpu_context;
401 host_ctxt->__hyp_running_vcpu = vcpu;
402 guest_ctxt = &vcpu->arch.ctxt;
404 sysreg_save_host_state_vhe(host_ctxt);
406 __activate_traps(vcpu);
407 __activate_vm(vcpu->kvm);
409 sysreg_restore_guest_state_vhe(guest_ctxt);
410 __debug_switch_to_guest(vcpu);
413 /* Jump in the fire! */
414 exit_code = __guest_enter(vcpu, host_ctxt);
416 /* And we're baaack! */
417 } while (fixup_guest_exit(vcpu, &exit_code));
419 fp_enabled = fpsimd_enabled_vhe();
421 sysreg_save_guest_state_vhe(guest_ctxt);
423 __deactivate_traps(vcpu);
425 sysreg_restore_host_state_vhe(host_ctxt);
428 __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
429 __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
430 __fpsimd_save_fpexc32(vcpu);
433 __debug_switch_to_host(vcpu);
438 /* Switch to the guest for legacy non-VHE systems */
439 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
441 struct kvm_cpu_context *host_ctxt;
442 struct kvm_cpu_context *guest_ctxt;
446 vcpu = kern_hyp_va(vcpu);
448 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
449 host_ctxt->__hyp_running_vcpu = vcpu;
450 guest_ctxt = &vcpu->arch.ctxt;
452 __sysreg_save_state_nvhe(host_ctxt);
454 __activate_traps(vcpu);
455 __activate_vm(kern_hyp_va(vcpu->kvm));
457 __hyp_vgic_restore_state(vcpu);
458 __timer_enable_traps(vcpu);
461 * We must restore the 32-bit state before the sysregs, thanks
462 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
464 __sysreg32_restore_state(vcpu);
465 __sysreg_restore_state_nvhe(guest_ctxt);
466 __debug_switch_to_guest(vcpu);
469 /* Jump in the fire! */
470 exit_code = __guest_enter(vcpu, host_ctxt);
472 /* And we're baaack! */
473 } while (fixup_guest_exit(vcpu, &exit_code));
475 fp_enabled = __fpsimd_enabled_nvhe();
477 __sysreg_save_state_nvhe(guest_ctxt);
478 __sysreg32_save_state(vcpu);
479 __timer_disable_traps(vcpu);
480 __hyp_vgic_save_state(vcpu);
482 __deactivate_traps(vcpu);
483 __deactivate_vm(vcpu);
485 __sysreg_restore_state_nvhe(host_ctxt);
488 __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
489 __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
490 __fpsimd_save_fpexc32(vcpu);
494 * This must come after restoring the host sysregs, since a non-VHE
495 * system may enable SPE here and make use of the TTBRs.
497 __debug_switch_to_host(vcpu);
502 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
504 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
505 struct kvm_cpu_context *__host_ctxt)
507 struct kvm_vcpu *vcpu;
508 unsigned long str_va;
510 vcpu = __host_ctxt->__hyp_running_vcpu;
512 if (read_sysreg(vttbr_el2)) {
513 __timer_disable_traps(vcpu);
514 __deactivate_traps(vcpu);
515 __deactivate_vm(vcpu);
516 __sysreg_restore_state_nvhe(__host_ctxt);
520 * Force the panic string to be loaded from the literal pool,
521 * making sure it is a kernel address and not a PC-relative
524 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
526 __hyp_do_panic(str_va,
528 read_sysreg(esr_el2), read_sysreg_el2(far),
529 read_sysreg(hpfar_el2), par, vcpu);
532 static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
533 struct kvm_cpu_context *host_ctxt)
535 struct kvm_vcpu *vcpu;
536 vcpu = host_ctxt->__hyp_running_vcpu;
538 __deactivate_traps(vcpu);
539 sysreg_restore_host_state_vhe(host_ctxt);
541 panic(__hyp_panic_string,
543 read_sysreg_el2(esr), read_sysreg_el2(far),
544 read_sysreg(hpfar_el2), par, vcpu);
547 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
549 u64 spsr = read_sysreg_el2(spsr);
550 u64 elr = read_sysreg_el2(elr);
551 u64 par = read_sysreg(par_el1);
554 __hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
556 __hyp_call_panic_vhe(spsr, elr, par, host_ctxt);