2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ARM64_KVM_MMU_H__
19 #define __ARM64_KVM_MMU_H__
22 #include <asm/memory.h>
23 #include <asm/cpufeature.h>
26 * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
27 * "negative" addresses. This makes it impossible to directly share
28 * mappings with the kernel.
30 * Instead, give the HYP mode its own VA region at a fixed offset from
31 * the kernel by just masking the top bits (which are all ones for a
32 * kernel address). We need to find out how many bits to mask.
34 * We want to build a set of page tables that cover both parts of the
35 * idmap (the trampoline page used to initialize EL2), and our normal
36 * runtime VA space, at the same time.
38 * Given that the kernel uses VA_BITS for its entire address space,
39 * and that half of that space (VA_BITS - 1) is used for the linear
40 * mapping, we can also limit the EL2 space to (VA_BITS - 1).
42 * The main question is "Within the VA_BITS space, does EL2 use the
43 * top or the bottom half of that space to shadow the kernel's linear
44 * mapping?". As we need to idmap the trampoline page, this is
45 * determined by the range in which this page lives.
47 * If the page is in the bottom half, we have to use the top half. If
48 * the page is in the top half, we have to use the bottom half:
50 * T = __pa_symbol(__hyp_idmap_text_start)
51 * if (T & BIT(VA_BITS - 1))
52 * HYP_VA_MIN = 0 //idmap in upper half
54 * HYP_VA_MIN = 1 << (VA_BITS - 1)
55 * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
57 * This of course assumes that the trampoline page exists within the
58 * VA_BITS range. If it doesn't, then it means we're in the odd case
59 * where the kernel idmap (as well as HYP) uses more levels than the
60 * kernel runtime page tables (as seen when the kernel is configured
61 * for 4k pages, 39bits VA, and yet memory lives just above that
62 * limit, forcing the idmap to use 4 levels of page tables while the
63 * kernel itself only uses 3). In this particular case, it doesn't
64 * matter which side of VA_BITS we use, as we're guaranteed not to
65 * conflict with anything.
67 * When using VHE, there are no separate hyp mappings and all KVM
68 * functionality is already mapped as part of the main kernel
69 * mappings, and none of this applies in that case.
74 #include <asm/alternative.h>
77 * Convert a kernel VA into a HYP VA.
78 * reg: VA to be converted.
80 * The actual code generation takes place in kvm_update_va_mask, and
81 * the instructions below are only there to reserve the space and
82 * perform the register allocation (kvm_update_va_mask uses the
83 * specific registers encoded in the instructions).
85 .macro kern_hyp_va reg
86 alternative_cb kvm_update_va_mask
87 and \reg, \reg, #1 /* mask with va_mask */
88 ror \reg, \reg, #1 /* rotate to the first tag bit */
89 add \reg, \reg, #0 /* insert the low 12 bits of the tag */
90 add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
91 ror \reg, \reg, #63 /* rotate back */
97 #include <asm/pgalloc.h>
98 #include <asm/cache.h>
99 #include <asm/cacheflush.h>
100 #include <asm/mmu_context.h>
101 #include <asm/pgtable.h>
103 void kvm_update_va_mask(struct alt_instr *alt,
104 __le32 *origptr, __le32 *updptr, int nr_inst);
106 static inline unsigned long __kern_hyp_va(unsigned long v)
108 asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
111 "add %0, %0, #0, lsl 12\n"
118 #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
121 * Obtain the PC-relative address of a kernel symbol
124 * The goal of this macro is to return a symbol's address based on a
125 * PC-relative computation, as opposed to a loading the VA from a
126 * constant pool or something similar. This works well for HYP, as an
127 * absolute VA is guaranteed to be wrong. Only use this if trying to
128 * obtain the address of a symbol (i.e. not something you obtained by
129 * following a pointer).
131 #define hyp_symbol_addr(s) \
134 asm("adrp %0, %1\n" \
135 "add %0, %0, :lo12:%1\n" \
136 : "=r" (addr) : "S" (&s)); \
141 * We currently only support a 40bit IPA.
143 #define KVM_PHYS_SHIFT (40)
144 #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
145 #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
147 #include <asm/stage2_pgtable.h>
149 int create_hyp_mappings(void *from, void *to, pgprot_t prot);
150 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
151 void __iomem **kaddr,
152 void __iomem **haddr);
153 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
155 void free_hyp_pgds(void);
157 void stage2_unmap_vm(struct kvm *kvm);
158 int kvm_alloc_stage2_pgd(struct kvm *kvm);
159 void kvm_free_stage2_pgd(struct kvm *kvm);
160 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
161 phys_addr_t pa, unsigned long size, bool writable);
163 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
165 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
167 phys_addr_t kvm_mmu_get_httbr(void);
168 phys_addr_t kvm_get_idmap_vector(void);
169 int kvm_mmu_init(void);
170 void kvm_clear_hyp_idmap(void);
172 #define kvm_mk_pmd(ptep) \
173 __pmd(__phys_to_pmd_val(__pa(ptep)) | PMD_TYPE_TABLE)
174 #define kvm_mk_pud(pmdp) \
175 __pud(__phys_to_pud_val(__pa(pmdp)) | PMD_TYPE_TABLE)
176 #define kvm_mk_pgd(pudp) \
177 __pgd(__phys_to_pgd_val(__pa(pudp)) | PUD_TYPE_TABLE)
179 static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
181 pte_val(pte) |= PTE_S2_RDWR;
185 static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
187 pmd_val(pmd) |= PMD_S2_RDWR;
191 static inline pte_t kvm_s2pte_mkexec(pte_t pte)
193 pte_val(pte) &= ~PTE_S2_XN;
197 static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
199 pmd_val(pmd) &= ~PMD_S2_XN;
203 static inline void kvm_set_s2pte_readonly(pte_t *ptep)
205 pteval_t old_pteval, pteval;
207 pteval = READ_ONCE(pte_val(*ptep));
210 pteval &= ~PTE_S2_RDWR;
211 pteval |= PTE_S2_RDONLY;
212 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
213 } while (pteval != old_pteval);
216 static inline bool kvm_s2pte_readonly(pte_t *ptep)
218 return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
221 static inline bool kvm_s2pte_exec(pte_t *ptep)
223 return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
226 static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
228 kvm_set_s2pte_readonly((pte_t *)pmdp);
231 static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
233 return kvm_s2pte_readonly((pte_t *)pmdp);
236 static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
238 return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
241 static inline bool kvm_page_empty(void *ptr)
243 struct page *ptr_page = virt_to_page(ptr);
244 return page_count(ptr_page) == 1;
247 #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
249 #ifdef __PAGETABLE_PMD_FOLDED
250 #define hyp_pmd_table_empty(pmdp) (0)
252 #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
255 #ifdef __PAGETABLE_PUD_FOLDED
256 #define hyp_pud_table_empty(pudp) (0)
258 #define hyp_pud_table_empty(pudp) kvm_page_empty(pudp)
263 #define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
265 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
267 return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
270 static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
272 void *va = page_address(pfn_to_page(pfn));
275 * With FWB, we ensure that the guest always accesses memory using
276 * cacheable attributes, and we don't have to clean to PoC when
277 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
278 * PoU is not required either in this case.
280 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
283 kvm_flush_dcache_to_poc(va, size);
286 static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
289 if (icache_is_aliasing()) {
290 /* any kind of VIPT cache */
291 __flush_icache_all();
292 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
293 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
294 void *va = page_address(pfn_to_page(pfn));
296 invalidate_icache_range((unsigned long)va,
297 (unsigned long)va + size);
301 static inline void __kvm_flush_dcache_pte(pte_t pte)
303 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
304 struct page *page = pte_page(pte);
305 kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
309 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
311 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
312 struct page *page = pmd_page(pmd);
313 kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
317 static inline void __kvm_flush_dcache_pud(pud_t pud)
319 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
320 struct page *page = pud_page(pud);
321 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
325 #define kvm_virt_to_phys(x) __pa_symbol(x)
327 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
328 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
330 static inline bool __kvm_cpu_uses_extended_idmap(void)
332 return __cpu_uses_extended_idmap_level();
335 static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
337 return idmap_ptrs_per_pgd;
341 * Can't use pgd_populate here, because the extended idmap adds an extra level
342 * above CONFIG_PGTABLE_LEVELS (which is 2 or 3 if we're using the extended
343 * idmap), and pgd_populate is only available if CONFIG_PGTABLE_LEVELS = 4.
345 static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
347 pgd_t *merged_hyp_pgd,
348 unsigned long hyp_idmap_start)
354 * Use the first entry to access the HYP mappings. It is
355 * guaranteed to be free, otherwise we wouldn't use an
358 VM_BUG_ON(pgd_val(merged_hyp_pgd[0]));
359 pgd_addr = __phys_to_pgd_val(__pa(hyp_pgd));
360 merged_hyp_pgd[0] = __pgd(pgd_addr | PMD_TYPE_TABLE);
363 * Create another extended level entry that points to the boot HYP map,
364 * which contains an ID mapping of the HYP init code. We essentially
365 * merge the boot and runtime HYP maps by doing so, but they don't
366 * overlap anyway, so this is fine.
368 idmap_idx = hyp_idmap_start >> VA_BITS;
369 VM_BUG_ON(pgd_val(merged_hyp_pgd[idmap_idx]));
370 pgd_addr = __phys_to_pgd_val(__pa(boot_hyp_pgd));
371 merged_hyp_pgd[idmap_idx] = __pgd(pgd_addr | PMD_TYPE_TABLE);
374 static inline unsigned int kvm_get_vmid_bits(void)
376 int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
378 return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
382 * We are not in the kvm->srcu critical section most of the time, so we take
383 * the SRCU read lock here. Since we copy the data from the user page, we
384 * can immediately drop the lock again.
386 static inline int kvm_read_guest_lock(struct kvm *kvm,
387 gpa_t gpa, void *data, unsigned long len)
389 int srcu_idx = srcu_read_lock(&kvm->srcu);
390 int ret = kvm_read_guest(kvm, gpa, data, len);
392 srcu_read_unlock(&kvm->srcu, srcu_idx);
397 #ifdef CONFIG_KVM_INDIRECT_VECTORS
399 * EL2 vectors can be mapped and rerouted in a number of ways,
400 * depending on the kernel configuration and CPU present:
402 * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
403 * hardening sequence is placed in one of the vector slots, which is
404 * executed before jumping to the real vectors.
406 * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
407 * ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
408 * hardening sequence is mapped next to the idmap page, and executed
409 * before jumping to the real vectors.
411 * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
412 * empty slot is selected, mapped next to the idmap page, and
413 * executed before jumping to the real vectors.
415 * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
416 * VHE, as we don't have hypervisor-specific mappings. If the system
417 * is VHE and yet selects this capability, it will be ignored.
421 extern void *__kvm_bp_vect_base;
422 extern int __kvm_harden_el2_vector_slot;
424 static inline void *kvm_get_hyp_vector(void)
426 struct bp_hardening_data *data = arm64_get_bp_hardening_data();
427 void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
430 if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
431 vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
432 slot = data->hyp_vectors_slot;
435 if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
436 vect = __kvm_bp_vect_base;
438 slot = __kvm_harden_el2_vector_slot;
442 vect += slot * SZ_2K;
447 /* This is only called on a !VHE system */
448 static inline int kvm_map_vectors(void)
451 * HBP = ARM64_HARDEN_BRANCH_PREDICTOR
452 * HEL2 = ARM64_HARDEN_EL2_VECTORS
454 * !HBP + !HEL2 -> use direct vectors
455 * HBP + !HEL2 -> use hardened vectors in place
456 * !HBP + HEL2 -> allocate one vector slot and use exec mapping
457 * HBP + HEL2 -> use hardened vertors and use exec mapping
459 if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
460 __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
461 __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
464 if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
465 phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
466 unsigned long size = (__bp_harden_hyp_vecs_end -
467 __bp_harden_hyp_vecs_start);
470 * Always allocate a spare vector slot, as we don't
471 * know yet which CPUs have a BP hardening slot that
474 __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
475 BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
476 return create_hyp_exec_mappings(vect_pa, size,
477 &__kvm_bp_vect_base);
483 static inline void *kvm_get_hyp_vector(void)
485 return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
488 static inline int kvm_map_vectors(void)
494 #ifdef CONFIG_ARM64_SSBD
495 DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
497 static inline int hyp_map_aux_data(void)
501 for_each_possible_cpu(cpu) {
504 ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
505 err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
512 static inline int hyp_map_aux_data(void)
518 #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
520 static inline bool kvm_cpu_has_cnp(void)
522 return system_supports_cnp();
525 #endif /* __ASSEMBLY__ */
526 #endif /* __ARM64_KVM_MMU_H__ */