2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #ifndef __ARM_KVM_MMU_H__
20 #define __ARM_KVM_MMU_H__
22 #include <asm/memory.h>
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
29 #define kern_hyp_va(kva) (kva)
31 /* Contrary to arm64, there is no need to generate a PC-relative address */
32 #define hyp_symbol_addr(s) \
34 typeof(s) *addr = &(s); \
39 * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
41 #define KVM_MMU_CACHE_MIN_PAGES 2
45 #include <linux/highmem.h>
46 #include <asm/cacheflush.h>
47 #include <asm/cputype.h>
48 #include <asm/kvm_hyp.h>
49 #include <asm/pgalloc.h>
50 #include <asm/stage2_pgtable.h>
52 /* Ensure compatibility with arm64 */
55 int create_hyp_mappings(void *from, void *to, pgprot_t prot);
56 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
58 void __iomem **haddr);
59 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
61 void free_hyp_pgds(void);
63 void stage2_unmap_vm(struct kvm *kvm);
64 int kvm_alloc_stage2_pgd(struct kvm *kvm);
65 void kvm_free_stage2_pgd(struct kvm *kvm);
66 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
67 phys_addr_t pa, unsigned long size, bool writable);
69 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
71 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
73 phys_addr_t kvm_mmu_get_httbr(void);
74 phys_addr_t kvm_get_idmap_vector(void);
75 int kvm_mmu_init(void);
76 void kvm_clear_hyp_idmap(void);
78 #define kvm_mk_pmd(ptep) __pmd(__pa(ptep) | PMD_TYPE_TABLE)
79 #define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE)
80 #define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; })
82 static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
84 pte_val(pte) |= L_PTE_S2_RDWR;
88 static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
90 pmd_val(pmd) |= L_PMD_S2_RDWR;
94 static inline pte_t kvm_s2pte_mkexec(pte_t pte)
96 pte_val(pte) &= ~L_PTE_XN;
100 static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
102 pmd_val(pmd) &= ~PMD_SECT_XN;
106 static inline void kvm_set_s2pte_readonly(pte_t *pte)
108 pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
111 static inline bool kvm_s2pte_readonly(pte_t *pte)
113 return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
116 static inline bool kvm_s2pte_exec(pte_t *pte)
118 return !(pte_val(*pte) & L_PTE_XN);
121 static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
123 pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
126 static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
128 return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
131 static inline bool kvm_s2pmd_exec(pmd_t *pmd)
133 return !(pmd_val(*pmd) & PMD_SECT_XN);
136 static inline bool kvm_page_empty(void *ptr)
138 struct page *ptr_page = virt_to_page(ptr);
139 return page_count(ptr_page) == 1;
142 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
143 #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
144 #define kvm_pud_table_empty(kvm, pudp) false
146 #define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
147 #define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
148 #define hyp_pud_table_empty(pudp) false
152 #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
154 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
156 return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
159 static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
162 * Clean the dcache to the Point of Coherency.
164 * We need to do this through a kernel mapping (using the
165 * user-space mapping has proved to be the wrong
166 * solution). For that, we need to kmap one page at a time,
167 * and iterate over the range.
170 VM_BUG_ON(size & ~PAGE_MASK);
173 void *va = kmap_atomic_pfn(pfn);
175 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
184 static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
190 * If we are going to insert an instruction page and the icache is
191 * either VIPT or PIPT, there is a potential problem where the host
192 * (or another VM) may have used the same page as this guest, and we
193 * read incorrect data from the icache. If we're using a PIPT cache,
194 * we can invalidate just that page, but if we are using a VIPT cache
195 * we need to invalidate the entire icache - damn shame - as written
196 * in the ARM ARM (DDI 0406C.b - Page B3-1393).
198 * VIVT caches are tagged using both the ASID and the VMID and doesn't
199 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
202 VM_BUG_ON(size & ~PAGE_MASK);
204 if (icache_is_vivt_asid_tagged())
207 if (!icache_is_pipt()) {
208 /* any kind of VIPT cache */
209 __flush_icache_all();
214 * CTR IminLine contains Log2 of the number of words in the
215 * cache line, so we can get the number of words as
216 * 2 << (IminLine - 1). To get the number of bytes, we
217 * multiply by 4 (the number of bytes in a 32-bit word), and
218 * get 4 << (IminLine).
220 iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf);
223 void *va = kmap_atomic_pfn(pfn);
224 void *end = va + PAGE_SIZE;
228 write_sysreg(addr, ICIMVAU);
230 } while (addr < end);
241 /* Check if we need to invalidate the BTB */
242 if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) {
243 write_sysreg(0, BPIALLIS);
249 static inline void __kvm_flush_dcache_pte(pte_t pte)
251 void *va = kmap_atomic(pte_page(pte));
253 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
258 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
260 unsigned long size = PMD_SIZE;
261 kvm_pfn_t pfn = pmd_pfn(pmd);
264 void *va = kmap_atomic_pfn(pfn);
266 kvm_flush_dcache_to_poc(va, PAGE_SIZE);
275 static inline void __kvm_flush_dcache_pud(pud_t pud)
279 #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
281 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
282 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
284 static inline bool __kvm_cpu_uses_extended_idmap(void)
289 static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
294 static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
296 pgd_t *merged_hyp_pgd,
297 unsigned long hyp_idmap_start) { }
299 static inline unsigned int kvm_get_vmid_bits(void)
305 * We are not in the kvm->srcu critical section most of the time, so we take
306 * the SRCU read lock here. Since we copy the data from the user page, we
307 * can immediately drop the lock again.
309 static inline int kvm_read_guest_lock(struct kvm *kvm,
310 gpa_t gpa, void *data, unsigned long len)
312 int srcu_idx = srcu_read_lock(&kvm->srcu);
313 int ret = kvm_read_guest(kvm, gpa, data, len);
315 srcu_read_unlock(&kvm->srcu, srcu_idx);
320 static inline void *kvm_get_hyp_vector(void)
322 switch(read_cpuid_part()) {
323 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
324 case ARM_CPU_PART_CORTEX_A12:
325 case ARM_CPU_PART_CORTEX_A17:
327 extern char __kvm_hyp_vector_bp_inv[];
328 return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
331 case ARM_CPU_PART_BRAHMA_B15:
332 case ARM_CPU_PART_CORTEX_A15:
334 extern char __kvm_hyp_vector_ic_inv[];
335 return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
340 extern char __kvm_hyp_vector[];
341 return kvm_ksym_ref(__kvm_hyp_vector);
346 static inline int kvm_map_vectors(void)
351 static inline int hyp_map_aux_data(void)
356 #define kvm_phys_to_vttbr(addr) (addr)
358 static inline bool kvm_cpu_has_cnp(void)
363 #endif /* !__ASSEMBLY__ */
365 #endif /* __ARM_KVM_MMU_H__ */