2 * Copyright (C) 2015-2018 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/arm-smccc.h>
19 #include <linux/linkage.h>
21 #include <asm/alternative.h>
22 #include <asm/assembler.h>
23 #include <asm/cpufeature.h>
24 #include <asm/kvm_arm.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_mmu.h>
30 .pushsection .hyp.text, "ax"
34 * Shuffle the parameters before calling the function
35 * pointed to in x0. Assumes parameters in x[1,2,3].
49 * We used to rely on having an exception return to get
50 * an implicit isb. In the E2H case, we don't have it anymore.
51 * rather than changing all the leaf functions, just do it here
52 * before returning to the rest of the kernel.
56 ENDPROC(__vhe_hyp_call)
58 el1_sync: // Guest trapped into EL2
61 lsr x0, x0, #ESR_ELx_EC_SHIFT
62 cmp x0, #ESR_ELx_EC_HVC64
63 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
66 mrs x1, vttbr_el2 // If vttbr is valid, the guest
67 cbnz x1, el1_hvc_guest // called HVC
69 /* Here, we're pretty sure the host called HVC. */
72 /* Check for a stub HVC call */
73 cmp x0, #HVC_STUB_HCALL_NR
77 * Compute the idmap address of __kvm_handle_stub_hvc and
78 * jump there. Since we use kimage_voffset, do not use the
79 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
80 * (by loading it from the constant pool).
82 * Preserve x0-x4, which may contain stub parameters.
84 ldr x5, =__kvm_handle_stub_hvc
85 ldr_l x6, kimage_voffset
93 * Perform the EL2 call
102 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
103 * The workaround has already been applied on the host,
104 * so let's quickly get back to the guest. We don't bother
105 * restoring x1, as it can be clobbered anyway.
107 ldr x1, [sp] // Guest's x0
108 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
111 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
112 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
113 ARM_SMCCC_ARCH_WORKAROUND_2)
116 #ifdef CONFIG_ARM64_SSBD
117 alternative_cb arm64_enable_wa2_handling
121 ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
123 // Sanitize the argument and update the guest flags
124 ldr x1, [sp, #8] // Guest's x1
125 clz w1, w1 // Murphy's device:
126 lsr w1, w1, #5 // w1 = !!w1 without using
127 eor w1, w1, #1 // the flags...
128 bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
129 str x0, [x2, #VCPU_WORKAROUND_FLAGS]
131 /* Check that we actually need to perform the call */
132 hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
135 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
138 /* Don't leak data from the SMC call */
152 mov x0, #ARM_EXCEPTION_TRAP
157 mov x0, #ARM_EXCEPTION_IRQ
162 mov x0, #ARM_EXCEPTION_EL1_SERROR
166 ldp x0, x1, [sp], #16
169 * Only two possibilities:
170 * 1) Either we come from the exit path, having just unmasked
171 * PSTATE.A: change the return code to an EL2 fault, and
172 * carry on, as we're already in a sane state to handle it.
173 * 2) Or we come from anywhere else, and that's a bug: we panic.
175 * For (1), x0 contains the original return code and x1 doesn't
176 * contain anything meaningful at that stage. We can reuse them
178 * For (2), who cares?
181 adr x1, abort_guest_exit_start
183 adr x1, abort_guest_exit_end
186 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
189 ENTRY(__hyp_do_panic)
190 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
196 ENDPROC(__hyp_do_panic)
203 .macro invalid_vector label, target = __hyp_panic
210 /* None of these should ever happen */
211 invalid_vector el2t_sync_invalid
212 invalid_vector el2t_irq_invalid
213 invalid_vector el2t_fiq_invalid
214 invalid_vector el2t_error_invalid
215 invalid_vector el2h_sync_invalid
216 invalid_vector el2h_irq_invalid
217 invalid_vector el2h_fiq_invalid
218 invalid_vector el1_fiq_invalid
224 .macro valid_vect target
226 stp x0, x1, [sp, #-16]!
230 .macro invalid_vect target
233 ldp x0, x1, [sp], #16
237 ENTRY(__kvm_hyp_vector)
238 invalid_vect el2t_sync_invalid // Synchronous EL2t
239 invalid_vect el2t_irq_invalid // IRQ EL2t
240 invalid_vect el2t_fiq_invalid // FIQ EL2t
241 invalid_vect el2t_error_invalid // Error EL2t
243 invalid_vect el2h_sync_invalid // Synchronous EL2h
244 invalid_vect el2h_irq_invalid // IRQ EL2h
245 invalid_vect el2h_fiq_invalid // FIQ EL2h
246 valid_vect el2_error // Error EL2h
248 valid_vect el1_sync // Synchronous 64-bit EL1
249 valid_vect el1_irq // IRQ 64-bit EL1
250 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
251 valid_vect el1_error // Error 64-bit EL1
253 valid_vect el1_sync // Synchronous 32-bit EL1
254 valid_vect el1_irq // IRQ 32-bit EL1
255 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
256 valid_vect el1_error // Error 32-bit EL1
257 ENDPROC(__kvm_hyp_vector)
259 #ifdef CONFIG_KVM_INDIRECT_VECTORS
266 * The default sequence is to directly branch to the KVM vectors,
267 * using the computed offset. This applies for VHE as well as
268 * !ARM64_HARDEN_EL2_VECTORS.
270 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
273 * stp x0, x1, [sp, #-16]!
274 * movz x0, #(addr & 0xffff)
275 * movk x0, #((addr >> 16) & 0xffff), lsl #16
276 * movk x0, #((addr >> 32) & 0xffff), lsl #32
279 * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
280 * See kvm_patch_vector_branch for details.
282 alternative_cb kvm_patch_vector_branch
283 b __kvm_hyp_vector + (1b - 0b)
291 .macro generate_vectors
296 .org 0b + SZ_2K // Safety measure
300 ENTRY(__bp_harden_hyp_vecs_start)
301 .rept BP_HARDEN_EL2_SLOTS
304 ENTRY(__bp_harden_hyp_vecs_end)
308 ENTRY(__smccc_workaround_1_smc_start)
310 stp x2, x3, [sp, #(8 * 0)]
311 stp x0, x1, [sp, #(8 * 2)]
312 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
314 ldp x2, x3, [sp, #(8 * 0)]
315 ldp x0, x1, [sp, #(8 * 2)]
317 ENTRY(__smccc_workaround_1_smc_end)