2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ARM64_KVM_HYP_H__
19 #define __ARM64_KVM_HYP_H__
21 #include <linux/compiler.h>
22 #include <linux/kvm_host.h>
23 #include <asm/kvm_mmu.h>
24 #include <asm/kvm_perf_event.h>
25 #include <asm/sysreg.h>
27 #define __hyp_text __section(.hyp.text) notrace
29 static inline unsigned long __kern_hyp_va(unsigned long v)
31 asm volatile(ALTERNATIVE("and %0, %0, %1",
33 ARM64_HAS_VIRT_HOST_EXTN)
34 : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
38 #define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
40 static inline unsigned long __hyp_kern_va(unsigned long v)
42 u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET;
43 asm volatile(ALTERNATIVE("add %0, %0, %1",
45 ARM64_HAS_VIRT_HOST_EXTN)
46 : "+r" (v) : "r" (offset));
50 #define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v)))
52 #define read_sysreg_elx(r,nvh,vh) \
55 asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
56 "mrs_s %0, " __stringify(r##vh),\
57 ARM64_HAS_VIRT_HOST_EXTN) \
62 #define write_sysreg_elx(v,r,nvh,vh) \
64 u64 __val = (u64)(v); \
65 asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
66 "msr_s " __stringify(r##vh) ", %x0",\
67 ARM64_HAS_VIRT_HOST_EXTN) \
72 * Unified accessors for registers that have a different encoding
73 * between VHE and non-VHE. They must be specified without their "ELx"
76 #define read_sysreg_el2(r) \
79 asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\
80 "mrs %0, " __stringify(r##_EL1),\
81 ARM64_HAS_VIRT_HOST_EXTN) \
86 #define write_sysreg_el2(v,r) \
88 u64 __val = (u64)(v); \
89 asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\
90 "msr " __stringify(r##_EL1) ", %x0",\
91 ARM64_HAS_VIRT_HOST_EXTN) \
95 #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
96 #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
97 #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
98 #define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12)
100 /* The VHE specific system registers and their encoding */
101 #define sctlr_EL12 sys_reg(3, 5, 1, 0, 0)
102 #define cpacr_EL12 sys_reg(3, 5, 1, 0, 2)
103 #define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0)
104 #define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1)
105 #define tcr_EL12 sys_reg(3, 5, 2, 0, 2)
106 #define afsr0_EL12 sys_reg(3, 5, 5, 1, 0)
107 #define afsr1_EL12 sys_reg(3, 5, 5, 1, 1)
108 #define esr_EL12 sys_reg(3, 5, 5, 2, 0)
109 #define far_EL12 sys_reg(3, 5, 6, 0, 0)
110 #define mair_EL12 sys_reg(3, 5, 10, 2, 0)
111 #define amair_EL12 sys_reg(3, 5, 10, 3, 0)
112 #define vbar_EL12 sys_reg(3, 5, 12, 0, 0)
113 #define contextidr_EL12 sys_reg(3, 5, 13, 0, 1)
114 #define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0)
115 #define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0)
116 #define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1)
117 #define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2)
118 #define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0)
119 #define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1)
120 #define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2)
121 #define spsr_EL12 sys_reg(3, 5, 4, 0, 0)
122 #define elr_EL12 sys_reg(3, 5, 4, 0, 1)
125 * hyp_alternate_select - Generates patchable code sequences that are
126 * used to switch between two implementations of a function, depending
127 * on the availability of a feature.
129 * @fname: a symbol name that will be defined as a function returning a
130 * function pointer whose type will match @orig and @alt
131 * @orig: A pointer to the default function, as returned by @fname when
133 * @alt: A pointer to the alternate function, as returned by @fname
135 * @cond: a CPU feature (as described in asm/cpufeature.h)
137 #define hyp_alternate_select(fname, orig, alt, cond) \
138 typeof(orig) * __hyp_text fname(void) \
140 typeof(alt) *val = orig; \
141 asm volatile(ALTERNATIVE("nop \n", \
144 : "+r" (val) : "r" (alt)); \
148 void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
149 void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
151 void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
152 void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
154 void __timer_save_state(struct kvm_vcpu *vcpu);
155 void __timer_restore_state(struct kvm_vcpu *vcpu);
157 void __sysreg_save_host_state(struct kvm_cpu_context *ctxt);
158 void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt);
159 void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt);
160 void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt);
161 void __sysreg32_save_state(struct kvm_vcpu *vcpu);
162 void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
164 void __debug_save_state(struct kvm_vcpu *vcpu,
165 struct kvm_guest_debug_arch *dbg,
166 struct kvm_cpu_context *ctxt);
167 void __debug_restore_state(struct kvm_vcpu *vcpu,
168 struct kvm_guest_debug_arch *dbg,
169 struct kvm_cpu_context *ctxt);
170 void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
171 void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
173 void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
174 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
175 bool __fpsimd_enabled(void);
177 u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
178 void __noreturn __hyp_do_panic(unsigned long, ...);
180 #endif /* __ARM64_KVM_HYP_H__ */