1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/assembler.h>
12 #include <asm/cpufeature.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_mmu.h>
19 .pushsection .hyp.text, "ax"
23 * Shuffle the parameters before calling the function
24 * pointed to in x0. Assumes parameters in x[1,2,3].
35 el1_sync: // Guest trapped into EL2
38 lsr x0, x0, #ESR_ELx_EC_SHIFT
39 cmp x0, #ESR_ELx_EC_HVC64
40 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
43 mrs x1, vttbr_el2 // If vttbr is valid, the guest
44 cbnz x1, el1_hvc_guest // called HVC
46 /* Here, we're pretty sure the host called HVC. */
49 /* Check for a stub HVC call */
50 cmp x0, #HVC_STUB_HCALL_NR
54 * Compute the idmap address of __kvm_handle_stub_hvc and
55 * jump there. Since we use kimage_voffset, do not use the
56 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
57 * (by loading it from the constant pool).
59 * Preserve x0-x4, which may contain stub parameters.
61 ldr x5, =__kvm_handle_stub_hvc
62 ldr_l x6, kimage_voffset
70 * Perform the EL2 call
80 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
81 * The workaround has already been applied on the host,
82 * so let's quickly get back to the guest. We don't bother
83 * restoring x1, as it can be clobbered anyway.
85 ldr x1, [sp] // Guest's x0
86 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
89 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
90 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
91 ARM_SMCCC_ARCH_WORKAROUND_2)
94 #ifdef CONFIG_ARM64_SSBD
95 alternative_cb arm64_enable_wa2_handling
99 ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
101 // Sanitize the argument and update the guest flags
102 ldr x1, [sp, #8] // Guest's x1
103 clz w1, w1 // Murphy's device:
104 lsr w1, w1, #5 // w1 = !!w1 without using
105 eor w1, w1, #1 // the flags...
106 bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
107 str x0, [x2, #VCPU_WORKAROUND_FLAGS]
109 /* Check that we actually need to perform the call */
110 hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
113 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
116 /* Don't leak data from the SMC call */
131 mov x0, #ARM_EXCEPTION_TRAP
136 mov x0, #ARM_EXCEPTION_IRQ
141 mov x0, #ARM_EXCEPTION_EL1_SERROR
145 /* Check for illegal exception return, otherwise panic */
148 /* if this was something else, then panic! */
152 /* Let's attempt a recovery from the illegal exception return */
154 mov x0, #ARM_EXCEPTION_IL
159 ldp x0, x1, [sp], #16
162 * Only two possibilities:
163 * 1) Either we come from the exit path, having just unmasked
164 * PSTATE.A: change the return code to an EL2 fault, and
165 * carry on, as we're already in a sane state to handle it.
166 * 2) Or we come from anywhere else, and that's a bug: we panic.
168 * For (1), x0 contains the original return code and x1 doesn't
169 * contain anything meaningful at that stage. We can reuse them
171 * For (2), who cares?
174 adr x1, abort_guest_exit_start
176 adr x1, abort_guest_exit_end
179 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
183 ENTRY(__hyp_do_panic)
184 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
191 ENDPROC(__hyp_do_panic)
198 .macro invalid_vector label, target = __hyp_panic
205 /* None of these should ever happen */
206 invalid_vector el2t_sync_invalid
207 invalid_vector el2t_irq_invalid
208 invalid_vector el2t_fiq_invalid
209 invalid_vector el2t_error_invalid
210 invalid_vector el2h_sync_invalid
211 invalid_vector el2h_irq_invalid
212 invalid_vector el2h_fiq_invalid
213 invalid_vector el1_fiq_invalid
219 .macro valid_vect target
221 stp x0, x1, [sp, #-16]!
225 .macro invalid_vect target
228 ldp x0, x1, [sp], #16
232 ENTRY(__kvm_hyp_vector)
233 invalid_vect el2t_sync_invalid // Synchronous EL2t
234 invalid_vect el2t_irq_invalid // IRQ EL2t
235 invalid_vect el2t_fiq_invalid // FIQ EL2t
236 invalid_vect el2t_error_invalid // Error EL2t
238 valid_vect el2_sync // Synchronous EL2h
239 invalid_vect el2h_irq_invalid // IRQ EL2h
240 invalid_vect el2h_fiq_invalid // FIQ EL2h
241 valid_vect el2_error // Error EL2h
243 valid_vect el1_sync // Synchronous 64-bit EL1
244 valid_vect el1_irq // IRQ 64-bit EL1
245 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
246 valid_vect el1_error // Error 64-bit EL1
248 valid_vect el1_sync // Synchronous 32-bit EL1
249 valid_vect el1_irq // IRQ 32-bit EL1
250 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
251 valid_vect el1_error // Error 32-bit EL1
252 ENDPROC(__kvm_hyp_vector)
254 #ifdef CONFIG_KVM_INDIRECT_VECTORS
261 * The default sequence is to directly branch to the KVM vectors,
262 * using the computed offset. This applies for VHE as well as
263 * !ARM64_HARDEN_EL2_VECTORS.
265 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
268 * stp x0, x1, [sp, #-16]!
269 * movz x0, #(addr & 0xffff)
270 * movk x0, #((addr >> 16) & 0xffff), lsl #16
271 * movk x0, #((addr >> 32) & 0xffff), lsl #32
274 * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
275 * See kvm_patch_vector_branch for details.
277 alternative_cb kvm_patch_vector_branch
278 b __kvm_hyp_vector + (1b - 0b)
286 .macro generate_vectors
291 .org 0b + SZ_2K // Safety measure
295 ENTRY(__bp_harden_hyp_vecs_start)
296 .rept BP_HARDEN_EL2_SLOTS
299 ENTRY(__bp_harden_hyp_vecs_end)
303 ENTRY(__smccc_workaround_1_smc_start)
305 stp x2, x3, [sp, #(8 * 0)]
306 stp x0, x1, [sp, #(8 * 2)]
307 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
309 ldp x2, x3, [sp, #(8 * 0)]
310 ldp x0, x1, [sp, #(8 * 2)]
312 ENTRY(__smccc_workaround_1_smc_end)