2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <linux/hugetlb.h>
23 #include <linux/sched/signal.h>
24 #include <trace/events/kvm.h>
25 #include <asm/pgalloc.h>
26 #include <asm/cacheflush.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/kvm_mmio.h>
30 #include <asm/kvm_ras.h>
31 #include <asm/kvm_asm.h>
32 #include <asm/kvm_emulate.h>
37 static pgd_t *boot_hyp_pgd;
38 static pgd_t *hyp_pgd;
39 static pgd_t *merged_hyp_pgd;
40 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
42 static unsigned long hyp_idmap_start;
43 static unsigned long hyp_idmap_end;
44 static phys_addr_t hyp_idmap_vector;
46 static unsigned long io_map_base;
48 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
50 #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
51 #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
53 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
55 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
59 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
60 * @kvm: pointer to kvm structure.
62 * Interface to HYP function to flush all VM TLB entries
64 void kvm_flush_remote_tlbs(struct kvm *kvm)
66 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
69 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
71 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
75 * D-Cache management functions. They take the page table entries by
76 * value, as they are flushing the cache using the kernel mapping (or
79 static void kvm_flush_dcache_pte(pte_t pte)
81 __kvm_flush_dcache_pte(pte);
84 static void kvm_flush_dcache_pmd(pmd_t pmd)
86 __kvm_flush_dcache_pmd(pmd);
89 static void kvm_flush_dcache_pud(pud_t pud)
91 __kvm_flush_dcache_pud(pud);
94 static bool kvm_is_device_pfn(unsigned long pfn)
96 return !pfn_valid(pfn);
100 * stage2_dissolve_pmd() - clear and flush huge PMD entry
101 * @kvm: pointer to kvm structure.
103 * @pmd: pmd pointer for IPA
105 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
106 * pages in the range dirty.
108 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
110 if (!pmd_thp_or_huge(*pmd))
114 kvm_tlb_flush_vmid_ipa(kvm, addr);
115 put_page(virt_to_page(pmd));
119 * stage2_dissolve_pud() - clear and flush huge PUD entry
120 * @kvm: pointer to kvm structure.
122 * @pud: pud pointer for IPA
124 * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all
125 * pages in the range dirty.
127 static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
129 if (!stage2_pud_huge(kvm, *pudp))
132 stage2_pud_clear(kvm, pudp);
133 kvm_tlb_flush_vmid_ipa(kvm, addr);
134 put_page(virt_to_page(pudp));
137 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
142 BUG_ON(max > KVM_NR_MEM_OBJS);
143 if (cache->nobjs >= min)
145 while (cache->nobjs < max) {
146 page = (void *)__get_free_page(PGALLOC_GFP);
149 cache->objects[cache->nobjs++] = page;
154 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
157 free_page((unsigned long)mc->objects[--mc->nobjs]);
160 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
164 BUG_ON(!mc || !mc->nobjs);
165 p = mc->objects[--mc->nobjs];
169 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
171 pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
172 stage2_pgd_clear(kvm, pgd);
173 kvm_tlb_flush_vmid_ipa(kvm, addr);
174 stage2_pud_free(kvm, pud_table);
175 put_page(virt_to_page(pgd));
178 static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
180 pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
181 VM_BUG_ON(stage2_pud_huge(kvm, *pud));
182 stage2_pud_clear(kvm, pud);
183 kvm_tlb_flush_vmid_ipa(kvm, addr);
184 stage2_pmd_free(kvm, pmd_table);
185 put_page(virt_to_page(pud));
188 static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
190 pte_t *pte_table = pte_offset_kernel(pmd, 0);
191 VM_BUG_ON(pmd_thp_or_huge(*pmd));
193 kvm_tlb_flush_vmid_ipa(kvm, addr);
194 pte_free_kernel(NULL, pte_table);
195 put_page(virt_to_page(pmd));
198 static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
200 WRITE_ONCE(*ptep, new_pte);
204 static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
206 WRITE_ONCE(*pmdp, new_pmd);
210 static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
212 kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
215 static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
217 WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
221 static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
223 WRITE_ONCE(*pgdp, kvm_mk_pgd(pudp));
228 * Unmapping vs dcache management:
230 * If a guest maps certain memory pages as uncached, all writes will
231 * bypass the data cache and go directly to RAM. However, the CPUs
232 * can still speculate reads (not writes) and fill cache lines with
235 * Those cache lines will be *clean* cache lines though, so a
236 * clean+invalidate operation is equivalent to an invalidate
237 * operation, because no cache lines are marked dirty.
239 * Those clean cache lines could be filled prior to an uncached write
240 * by the guest, and the cache coherent IO subsystem would therefore
241 * end up writing old data to disk.
243 * This is why right after unmapping a page/section and invalidating
244 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
245 * the IO subsystem will never hit in the cache.
247 * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
248 * we then fully enforce cacheability of RAM, no matter what the guest
251 static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
252 phys_addr_t addr, phys_addr_t end)
254 phys_addr_t start_addr = addr;
255 pte_t *pte, *start_pte;
257 start_pte = pte = pte_offset_kernel(pmd, addr);
259 if (!pte_none(*pte)) {
260 pte_t old_pte = *pte;
262 kvm_set_pte(pte, __pte(0));
263 kvm_tlb_flush_vmid_ipa(kvm, addr);
265 /* No need to invalidate the cache for device mappings */
266 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
267 kvm_flush_dcache_pte(old_pte);
269 put_page(virt_to_page(pte));
271 } while (pte++, addr += PAGE_SIZE, addr != end);
273 if (stage2_pte_table_empty(kvm, start_pte))
274 clear_stage2_pmd_entry(kvm, pmd, start_addr);
277 static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
278 phys_addr_t addr, phys_addr_t end)
280 phys_addr_t next, start_addr = addr;
281 pmd_t *pmd, *start_pmd;
283 start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
285 next = stage2_pmd_addr_end(kvm, addr, end);
286 if (!pmd_none(*pmd)) {
287 if (pmd_thp_or_huge(*pmd)) {
288 pmd_t old_pmd = *pmd;
291 kvm_tlb_flush_vmid_ipa(kvm, addr);
293 kvm_flush_dcache_pmd(old_pmd);
295 put_page(virt_to_page(pmd));
297 unmap_stage2_ptes(kvm, pmd, addr, next);
300 } while (pmd++, addr = next, addr != end);
302 if (stage2_pmd_table_empty(kvm, start_pmd))
303 clear_stage2_pud_entry(kvm, pud, start_addr);
306 static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
307 phys_addr_t addr, phys_addr_t end)
309 phys_addr_t next, start_addr = addr;
310 pud_t *pud, *start_pud;
312 start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
314 next = stage2_pud_addr_end(kvm, addr, end);
315 if (!stage2_pud_none(kvm, *pud)) {
316 if (stage2_pud_huge(kvm, *pud)) {
317 pud_t old_pud = *pud;
319 stage2_pud_clear(kvm, pud);
320 kvm_tlb_flush_vmid_ipa(kvm, addr);
321 kvm_flush_dcache_pud(old_pud);
322 put_page(virt_to_page(pud));
324 unmap_stage2_pmds(kvm, pud, addr, next);
327 } while (pud++, addr = next, addr != end);
329 if (stage2_pud_table_empty(kvm, start_pud))
330 clear_stage2_pgd_entry(kvm, pgd, start_addr);
334 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
335 * @kvm: The VM pointer
336 * @start: The intermediate physical base address of the range to unmap
337 * @size: The size of the area to unmap
339 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
340 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
341 * destroying the VM), otherwise another faulting VCPU may come in and mess
342 * with things behind our backs.
344 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
347 phys_addr_t addr = start, end = start + size;
350 assert_spin_locked(&kvm->mmu_lock);
351 WARN_ON(size & ~PAGE_MASK);
353 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
356 * Make sure the page table is still active, as another thread
357 * could have possibly freed the page table, while we released
360 if (!READ_ONCE(kvm->arch.pgd))
362 next = stage2_pgd_addr_end(kvm, addr, end);
363 if (!stage2_pgd_none(kvm, *pgd))
364 unmap_stage2_puds(kvm, pgd, addr, next);
366 * If the range is too large, release the kvm->mmu_lock
367 * to prevent starvation and lockup detector warnings.
370 cond_resched_lock(&kvm->mmu_lock);
371 } while (pgd++, addr = next, addr != end);
374 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
375 phys_addr_t addr, phys_addr_t end)
379 pte = pte_offset_kernel(pmd, addr);
381 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
382 kvm_flush_dcache_pte(*pte);
383 } while (pte++, addr += PAGE_SIZE, addr != end);
386 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
387 phys_addr_t addr, phys_addr_t end)
392 pmd = stage2_pmd_offset(kvm, pud, addr);
394 next = stage2_pmd_addr_end(kvm, addr, end);
395 if (!pmd_none(*pmd)) {
396 if (pmd_thp_or_huge(*pmd))
397 kvm_flush_dcache_pmd(*pmd);
399 stage2_flush_ptes(kvm, pmd, addr, next);
401 } while (pmd++, addr = next, addr != end);
404 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
405 phys_addr_t addr, phys_addr_t end)
410 pud = stage2_pud_offset(kvm, pgd, addr);
412 next = stage2_pud_addr_end(kvm, addr, end);
413 if (!stage2_pud_none(kvm, *pud)) {
414 if (stage2_pud_huge(kvm, *pud))
415 kvm_flush_dcache_pud(*pud);
417 stage2_flush_pmds(kvm, pud, addr, next);
419 } while (pud++, addr = next, addr != end);
422 static void stage2_flush_memslot(struct kvm *kvm,
423 struct kvm_memory_slot *memslot)
425 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
426 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
430 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
432 next = stage2_pgd_addr_end(kvm, addr, end);
433 if (!stage2_pgd_none(kvm, *pgd))
434 stage2_flush_puds(kvm, pgd, addr, next);
435 } while (pgd++, addr = next, addr != end);
439 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
440 * @kvm: The struct kvm pointer
442 * Go through the stage 2 page tables and invalidate any cache lines
443 * backing memory already mapped to the VM.
445 static void stage2_flush_vm(struct kvm *kvm)
447 struct kvm_memslots *slots;
448 struct kvm_memory_slot *memslot;
451 idx = srcu_read_lock(&kvm->srcu);
452 spin_lock(&kvm->mmu_lock);
454 slots = kvm_memslots(kvm);
455 kvm_for_each_memslot(memslot, slots)
456 stage2_flush_memslot(kvm, memslot);
458 spin_unlock(&kvm->mmu_lock);
459 srcu_read_unlock(&kvm->srcu, idx);
462 static void clear_hyp_pgd_entry(pgd_t *pgd)
464 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
466 pud_free(NULL, pud_table);
467 put_page(virt_to_page(pgd));
470 static void clear_hyp_pud_entry(pud_t *pud)
472 pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
473 VM_BUG_ON(pud_huge(*pud));
475 pmd_free(NULL, pmd_table);
476 put_page(virt_to_page(pud));
479 static void clear_hyp_pmd_entry(pmd_t *pmd)
481 pte_t *pte_table = pte_offset_kernel(pmd, 0);
482 VM_BUG_ON(pmd_thp_or_huge(*pmd));
484 pte_free_kernel(NULL, pte_table);
485 put_page(virt_to_page(pmd));
488 static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
490 pte_t *pte, *start_pte;
492 start_pte = pte = pte_offset_kernel(pmd, addr);
494 if (!pte_none(*pte)) {
495 kvm_set_pte(pte, __pte(0));
496 put_page(virt_to_page(pte));
498 } while (pte++, addr += PAGE_SIZE, addr != end);
500 if (hyp_pte_table_empty(start_pte))
501 clear_hyp_pmd_entry(pmd);
504 static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
507 pmd_t *pmd, *start_pmd;
509 start_pmd = pmd = pmd_offset(pud, addr);
511 next = pmd_addr_end(addr, end);
512 /* Hyp doesn't use huge pmds */
514 unmap_hyp_ptes(pmd, addr, next);
515 } while (pmd++, addr = next, addr != end);
517 if (hyp_pmd_table_empty(start_pmd))
518 clear_hyp_pud_entry(pud);
521 static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
524 pud_t *pud, *start_pud;
526 start_pud = pud = pud_offset(pgd, addr);
528 next = pud_addr_end(addr, end);
529 /* Hyp doesn't use huge puds */
531 unmap_hyp_pmds(pud, addr, next);
532 } while (pud++, addr = next, addr != end);
534 if (hyp_pud_table_empty(start_pud))
535 clear_hyp_pgd_entry(pgd);
538 static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd)
540 return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1);
543 static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
544 phys_addr_t start, u64 size)
547 phys_addr_t addr = start, end = start + size;
551 * We don't unmap anything from HYP, except at the hyp tear down.
552 * Hence, we don't have to invalidate the TLBs here.
554 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
556 next = pgd_addr_end(addr, end);
558 unmap_hyp_puds(pgd, addr, next);
559 } while (pgd++, addr = next, addr != end);
562 static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
564 __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size);
567 static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
569 __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size);
573 * free_hyp_pgds - free Hyp-mode page tables
575 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
576 * therefore contains either mappings in the kernel memory area (above
577 * PAGE_OFFSET), or device mappings in the idmap range.
579 * boot_hyp_pgd should only map the idmap range, and is only used in
580 * the extended idmap case.
582 void free_hyp_pgds(void)
586 mutex_lock(&kvm_hyp_pgd_mutex);
588 id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
591 /* In case we never called hyp_mmu_init() */
593 io_map_base = hyp_idmap_start;
594 unmap_hyp_idmap_range(id_pgd, io_map_base,
595 hyp_idmap_start + PAGE_SIZE - io_map_base);
599 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
604 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
605 (uintptr_t)high_memory - PAGE_OFFSET);
607 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
610 if (merged_hyp_pgd) {
611 clear_page(merged_hyp_pgd);
612 free_page((unsigned long)merged_hyp_pgd);
613 merged_hyp_pgd = NULL;
616 mutex_unlock(&kvm_hyp_pgd_mutex);
619 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
620 unsigned long end, unsigned long pfn,
628 pte = pte_offset_kernel(pmd, addr);
629 kvm_set_pte(pte, kvm_pfn_pte(pfn, prot));
630 get_page(virt_to_page(pte));
632 } while (addr += PAGE_SIZE, addr != end);
635 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
636 unsigned long end, unsigned long pfn,
641 unsigned long addr, next;
645 pmd = pmd_offset(pud, addr);
647 BUG_ON(pmd_sect(*pmd));
649 if (pmd_none(*pmd)) {
650 pte = pte_alloc_one_kernel(NULL);
652 kvm_err("Cannot allocate Hyp pte\n");
655 kvm_pmd_populate(pmd, pte);
656 get_page(virt_to_page(pmd));
659 next = pmd_addr_end(addr, end);
661 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
662 pfn += (next - addr) >> PAGE_SHIFT;
663 } while (addr = next, addr != end);
668 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
669 unsigned long end, unsigned long pfn,
674 unsigned long addr, next;
679 pud = pud_offset(pgd, addr);
681 if (pud_none_or_clear_bad(pud)) {
682 pmd = pmd_alloc_one(NULL, addr);
684 kvm_err("Cannot allocate Hyp pmd\n");
687 kvm_pud_populate(pud, pmd);
688 get_page(virt_to_page(pud));
691 next = pud_addr_end(addr, end);
692 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
695 pfn += (next - addr) >> PAGE_SHIFT;
696 } while (addr = next, addr != end);
701 static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
702 unsigned long start, unsigned long end,
703 unsigned long pfn, pgprot_t prot)
707 unsigned long addr, next;
710 mutex_lock(&kvm_hyp_pgd_mutex);
711 addr = start & PAGE_MASK;
712 end = PAGE_ALIGN(end);
714 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
716 if (pgd_none(*pgd)) {
717 pud = pud_alloc_one(NULL, addr);
719 kvm_err("Cannot allocate Hyp pud\n");
723 kvm_pgd_populate(pgd, pud);
724 get_page(virt_to_page(pgd));
727 next = pgd_addr_end(addr, end);
728 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
731 pfn += (next - addr) >> PAGE_SHIFT;
732 } while (addr = next, addr != end);
734 mutex_unlock(&kvm_hyp_pgd_mutex);
738 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
740 if (!is_vmalloc_addr(kaddr)) {
741 BUG_ON(!virt_addr_valid(kaddr));
744 return page_to_phys(vmalloc_to_page(kaddr)) +
745 offset_in_page(kaddr);
750 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
751 * @from: The virtual kernel start address of the range
752 * @to: The virtual kernel end address of the range (exclusive)
753 * @prot: The protection to be applied to this range
755 * The same virtual address as the kernel virtual address is also used
756 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
759 int create_hyp_mappings(void *from, void *to, pgprot_t prot)
761 phys_addr_t phys_addr;
762 unsigned long virt_addr;
763 unsigned long start = kern_hyp_va((unsigned long)from);
764 unsigned long end = kern_hyp_va((unsigned long)to);
766 if (is_kernel_in_hyp_mode())
769 start = start & PAGE_MASK;
770 end = PAGE_ALIGN(end);
772 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
775 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
776 err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
777 virt_addr, virt_addr + PAGE_SIZE,
778 __phys_to_pfn(phys_addr),
787 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
788 unsigned long *haddr, pgprot_t prot)
790 pgd_t *pgd = hyp_pgd;
794 mutex_lock(&kvm_hyp_pgd_mutex);
797 * This assumes that we we have enough space below the idmap
798 * page to allocate our VAs. If not, the check below will
799 * kick. A potential alternative would be to detect that
800 * overflow and switch to an allocation above the idmap.
802 * The allocated size is always a multiple of PAGE_SIZE.
804 size = PAGE_ALIGN(size + offset_in_page(phys_addr));
805 base = io_map_base - size;
808 * Verify that BIT(VA_BITS - 1) hasn't been flipped by
809 * allocating the new area, as it would indicate we've
810 * overflowed the idmap/IO address range.
812 if ((base ^ io_map_base) & BIT(VA_BITS - 1))
817 mutex_unlock(&kvm_hyp_pgd_mutex);
822 if (__kvm_cpu_uses_extended_idmap())
825 ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
827 __phys_to_pfn(phys_addr), prot);
831 *haddr = base + offset_in_page(phys_addr);
838 * create_hyp_io_mappings - Map IO into both kernel and HYP
839 * @phys_addr: The physical start address which gets mapped
840 * @size: Size of the region being mapped
841 * @kaddr: Kernel VA for this mapping
842 * @haddr: HYP VA for this mapping
844 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
845 void __iomem **kaddr,
846 void __iomem **haddr)
851 *kaddr = ioremap(phys_addr, size);
855 if (is_kernel_in_hyp_mode()) {
860 ret = __create_hyp_private_mapping(phys_addr, size,
861 &addr, PAGE_HYP_DEVICE);
869 *haddr = (void __iomem *)addr;
874 * create_hyp_exec_mappings - Map an executable range into HYP
875 * @phys_addr: The physical start address which gets mapped
876 * @size: Size of the region being mapped
877 * @haddr: HYP VA for this mapping
879 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
885 BUG_ON(is_kernel_in_hyp_mode());
887 ret = __create_hyp_private_mapping(phys_addr, size,
888 &addr, PAGE_HYP_EXEC);
894 *haddr = (void *)addr;
899 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
900 * @kvm: The KVM struct pointer for the VM.
902 * Allocates only the stage-2 HW PGD level table(s) (can support either full
903 * 40-bit input addresses or limited to 32-bit input addresses). Clears the
906 * Note we don't need locking here as this is only called when the VM is
907 * created, which can only be done once.
909 int kvm_alloc_stage2_pgd(struct kvm *kvm)
911 phys_addr_t pgd_phys;
914 if (kvm->arch.pgd != NULL) {
915 kvm_err("kvm_arch already initialized?\n");
919 /* Allocate the HW PGD, making sure that each page gets its own refcount */
920 pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
924 pgd_phys = virt_to_phys(pgd);
925 if (WARN_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm)))
929 kvm->arch.pgd_phys = pgd_phys;
933 static void stage2_unmap_memslot(struct kvm *kvm,
934 struct kvm_memory_slot *memslot)
936 hva_t hva = memslot->userspace_addr;
937 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
938 phys_addr_t size = PAGE_SIZE * memslot->npages;
939 hva_t reg_end = hva + size;
942 * A memory region could potentially cover multiple VMAs, and any holes
943 * between them, so iterate over all of them to find out if we should
946 * +--------------------------------------------+
947 * +---------------+----------------+ +----------------+
948 * | : VMA 1 | VMA 2 | | VMA 3 : |
949 * +---------------+----------------+ +----------------+
951 * +--------------------------------------------+
954 struct vm_area_struct *vma = find_vma(current->mm, hva);
955 hva_t vm_start, vm_end;
957 if (!vma || vma->vm_start >= reg_end)
961 * Take the intersection of this VMA with the memory region
963 vm_start = max(hva, vma->vm_start);
964 vm_end = min(reg_end, vma->vm_end);
966 if (!(vma->vm_flags & VM_PFNMAP)) {
967 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
968 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
971 } while (hva < reg_end);
975 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
976 * @kvm: The struct kvm pointer
978 * Go through the memregions and unmap any reguler RAM
979 * backing memory already mapped to the VM.
981 void stage2_unmap_vm(struct kvm *kvm)
983 struct kvm_memslots *slots;
984 struct kvm_memory_slot *memslot;
987 idx = srcu_read_lock(&kvm->srcu);
988 down_read(¤t->mm->mmap_sem);
989 spin_lock(&kvm->mmu_lock);
991 slots = kvm_memslots(kvm);
992 kvm_for_each_memslot(memslot, slots)
993 stage2_unmap_memslot(kvm, memslot);
995 spin_unlock(&kvm->mmu_lock);
996 up_read(¤t->mm->mmap_sem);
997 srcu_read_unlock(&kvm->srcu, idx);
1001 * kvm_free_stage2_pgd - free all stage-2 tables
1002 * @kvm: The KVM struct pointer for the VM.
1004 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
1005 * underlying level-2 and level-3 tables before freeing the actual level-1 table
1006 * and setting the struct pointer to NULL.
1008 void kvm_free_stage2_pgd(struct kvm *kvm)
1012 spin_lock(&kvm->mmu_lock);
1013 if (kvm->arch.pgd) {
1014 unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
1015 pgd = READ_ONCE(kvm->arch.pgd);
1016 kvm->arch.pgd = NULL;
1017 kvm->arch.pgd_phys = 0;
1019 spin_unlock(&kvm->mmu_lock);
1021 /* Free the HW pgd, one page at a time */
1023 free_pages_exact(pgd, stage2_pgd_size(kvm));
1026 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1032 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1033 if (stage2_pgd_none(kvm, *pgd)) {
1036 pud = mmu_memory_cache_alloc(cache);
1037 stage2_pgd_populate(kvm, pgd, pud);
1038 get_page(virt_to_page(pgd));
1041 return stage2_pud_offset(kvm, pgd, addr);
1044 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1050 pud = stage2_get_pud(kvm, cache, addr);
1051 if (!pud || stage2_pud_huge(kvm, *pud))
1054 if (stage2_pud_none(kvm, *pud)) {
1057 pmd = mmu_memory_cache_alloc(cache);
1058 stage2_pud_populate(kvm, pud, pmd);
1059 get_page(virt_to_page(pud));
1062 return stage2_pmd_offset(kvm, pud, addr);
1065 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1066 *cache, phys_addr_t addr, const pmd_t *new_pmd)
1068 pmd_t *pmd, old_pmd;
1070 pmd = stage2_get_pmd(kvm, cache, addr);
1074 if (pmd_present(old_pmd)) {
1076 * Multiple vcpus faulting on the same PMD entry, can
1077 * lead to them sequentially updating the PMD with the
1078 * same value. Following the break-before-make
1079 * (pmd_clear() followed by tlb_flush()) process can
1080 * hinder forward progress due to refaults generated
1081 * on missing translations.
1083 * Skip updating the page table if the entry is
1086 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1090 * Mapping in huge pages should only happen through a
1091 * fault. If a page is merged into a transparent huge
1092 * page, the individual subpages of that huge page
1093 * should be unmapped through MMU notifiers before we
1096 * Merging of CompoundPages is not supported; they
1097 * should become splitting first, unmapped, merged,
1098 * and mapped back in on-demand.
1100 VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
1103 kvm_tlb_flush_vmid_ipa(kvm, addr);
1105 get_page(virt_to_page(pmd));
1108 kvm_set_pmd(pmd, *new_pmd);
1112 static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1113 phys_addr_t addr, const pud_t *new_pudp)
1115 pud_t *pudp, old_pud;
1117 pudp = stage2_get_pud(kvm, cache, addr);
1123 * A large number of vcpus faulting on the same stage 2 entry,
1124 * can lead to a refault due to the
1125 * stage2_pud_clear()/tlb_flush(). Skip updating the page
1126 * tables if there is no change.
1128 if (pud_val(old_pud) == pud_val(*new_pudp))
1131 if (stage2_pud_present(kvm, old_pud)) {
1132 stage2_pud_clear(kvm, pudp);
1133 kvm_tlb_flush_vmid_ipa(kvm, addr);
1135 get_page(virt_to_page(pudp));
1138 kvm_set_pud(pudp, *new_pudp);
1143 * stage2_get_leaf_entry - walk the stage2 VM page tables and return
1144 * true if a valid and present leaf-entry is found. A pointer to the
1145 * leaf-entry is returned in the appropriate level variable - pudpp,
1148 static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
1149 pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
1159 pudp = stage2_get_pud(kvm, NULL, addr);
1160 if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
1163 if (stage2_pud_huge(kvm, *pudp)) {
1168 pmdp = stage2_pmd_offset(kvm, pudp, addr);
1169 if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
1172 if (pmd_thp_or_huge(*pmdp)) {
1177 ptep = pte_offset_kernel(pmdp, addr);
1178 if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
1185 static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
1192 found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep);
1197 return kvm_s2pud_exec(pudp);
1199 return kvm_s2pmd_exec(pmdp);
1201 return kvm_s2pte_exec(ptep);
1204 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1205 phys_addr_t addr, const pte_t *new_pte,
1206 unsigned long flags)
1210 pte_t *pte, old_pte;
1211 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
1212 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
1214 VM_BUG_ON(logging_active && !cache);
1216 /* Create stage-2 page table mapping - Levels 0 and 1 */
1217 pud = stage2_get_pud(kvm, cache, addr);
1220 * Ignore calls from kvm_set_spte_hva for unallocated
1227 * While dirty page logging - dissolve huge PUD, then continue
1228 * on to allocate page.
1231 stage2_dissolve_pud(kvm, addr, pud);
1233 if (stage2_pud_none(kvm, *pud)) {
1235 return 0; /* ignore calls from kvm_set_spte_hva */
1236 pmd = mmu_memory_cache_alloc(cache);
1237 stage2_pud_populate(kvm, pud, pmd);
1238 get_page(virt_to_page(pud));
1241 pmd = stage2_pmd_offset(kvm, pud, addr);
1244 * Ignore calls from kvm_set_spte_hva for unallocated
1251 * While dirty page logging - dissolve huge PMD, then continue on to
1255 stage2_dissolve_pmd(kvm, addr, pmd);
1257 /* Create stage-2 page mappings - Level 2 */
1258 if (pmd_none(*pmd)) {
1260 return 0; /* ignore calls from kvm_set_spte_hva */
1261 pte = mmu_memory_cache_alloc(cache);
1262 kvm_pmd_populate(pmd, pte);
1263 get_page(virt_to_page(pmd));
1266 pte = pte_offset_kernel(pmd, addr);
1268 if (iomap && pte_present(*pte))
1271 /* Create 2nd stage page table mapping - Level 3 */
1273 if (pte_present(old_pte)) {
1274 /* Skip page table update if there is no change */
1275 if (pte_val(old_pte) == pte_val(*new_pte))
1278 kvm_set_pte(pte, __pte(0));
1279 kvm_tlb_flush_vmid_ipa(kvm, addr);
1281 get_page(virt_to_page(pte));
1284 kvm_set_pte(pte, *new_pte);
1288 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1289 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1291 if (pte_young(*pte)) {
1292 *pte = pte_mkold(*pte);
1298 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1300 return __ptep_test_and_clear_young(pte);
1304 static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1306 return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1309 static int stage2_pudp_test_and_clear_young(pud_t *pud)
1311 return stage2_ptep_test_and_clear_young((pte_t *)pud);
1315 * kvm_phys_addr_ioremap - map a device range to guest IPA
1317 * @kvm: The KVM pointer
1318 * @guest_ipa: The IPA at which to insert the mapping
1319 * @pa: The physical address of the device
1320 * @size: The size of the mapping
1322 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1323 phys_addr_t pa, unsigned long size, bool writable)
1325 phys_addr_t addr, end;
1328 struct kvm_mmu_memory_cache cache = { 0, };
1330 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1331 pfn = __phys_to_pfn(pa);
1333 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
1334 pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
1337 pte = kvm_s2pte_mkwrite(pte);
1339 ret = mmu_topup_memory_cache(&cache,
1340 kvm_mmu_cache_min_pages(kvm),
1344 spin_lock(&kvm->mmu_lock);
1345 ret = stage2_set_pte(kvm, &cache, addr, &pte,
1346 KVM_S2PTE_FLAG_IS_IOMAP);
1347 spin_unlock(&kvm->mmu_lock);
1355 mmu_free_memory_cache(&cache);
1359 static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
1361 kvm_pfn_t pfn = *pfnp;
1362 gfn_t gfn = *ipap >> PAGE_SHIFT;
1363 struct page *page = pfn_to_page(pfn);
1366 * PageTransCompoundMap() returns true for THP and
1367 * hugetlbfs. Make sure the adjustment is done only for THP
1370 if (!PageHuge(page) && PageTransCompoundMap(page)) {
1373 * The address we faulted on is backed by a transparent huge
1374 * page. However, because we map the compound huge page and
1375 * not the individual tail page, we need to transfer the
1376 * refcount to the head page. We have to be careful that the
1377 * THP doesn't start to split while we are adjusting the
1380 * We are sure this doesn't happen, because mmu_notifier_retry
1381 * was successful and we are holding the mmu_lock, so if this
1382 * THP is trying to split, it will be blocked in the mmu
1383 * notifier before touching any of the pages, specifically
1384 * before being able to call __split_huge_page_refcount().
1386 * We can therefore safely transfer the refcount from PG_tail
1387 * to PG_head and switch the pfn from a tail page to the head
1390 mask = PTRS_PER_PMD - 1;
1391 VM_BUG_ON((gfn & mask) != (pfn & mask));
1394 kvm_release_pfn_clean(pfn);
1407 * stage2_wp_ptes - write protect PMD range
1408 * @pmd: pointer to pmd entry
1409 * @addr: range start address
1410 * @end: range end address
1412 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1416 pte = pte_offset_kernel(pmd, addr);
1418 if (!pte_none(*pte)) {
1419 if (!kvm_s2pte_readonly(pte))
1420 kvm_set_s2pte_readonly(pte);
1422 } while (pte++, addr += PAGE_SIZE, addr != end);
1426 * stage2_wp_pmds - write protect PUD range
1427 * kvm: kvm instance for the VM
1428 * @pud: pointer to pud entry
1429 * @addr: range start address
1430 * @end: range end address
1432 static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
1433 phys_addr_t addr, phys_addr_t end)
1438 pmd = stage2_pmd_offset(kvm, pud, addr);
1441 next = stage2_pmd_addr_end(kvm, addr, end);
1442 if (!pmd_none(*pmd)) {
1443 if (pmd_thp_or_huge(*pmd)) {
1444 if (!kvm_s2pmd_readonly(pmd))
1445 kvm_set_s2pmd_readonly(pmd);
1447 stage2_wp_ptes(pmd, addr, next);
1450 } while (pmd++, addr = next, addr != end);
1454 * stage2_wp_puds - write protect PGD range
1455 * @pgd: pointer to pgd entry
1456 * @addr: range start address
1457 * @end: range end address
1459 * Process PUD entries, for a huge PUD we cause a panic.
1461 static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
1462 phys_addr_t addr, phys_addr_t end)
1467 pud = stage2_pud_offset(kvm, pgd, addr);
1469 next = stage2_pud_addr_end(kvm, addr, end);
1470 if (!stage2_pud_none(kvm, *pud)) {
1471 if (stage2_pud_huge(kvm, *pud)) {
1472 if (!kvm_s2pud_readonly(pud))
1473 kvm_set_s2pud_readonly(pud);
1475 stage2_wp_pmds(kvm, pud, addr, next);
1478 } while (pud++, addr = next, addr != end);
1482 * stage2_wp_range() - write protect stage2 memory region range
1483 * @kvm: The KVM pointer
1484 * @addr: Start address of range
1485 * @end: End address of range
1487 static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1492 pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
1495 * Release kvm_mmu_lock periodically if the memory region is
1496 * large. Otherwise, we may see kernel panics with
1497 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1498 * CONFIG_LOCKDEP. Additionally, holding the lock too long
1499 * will also starve other vCPUs. We have to also make sure
1500 * that the page tables are not freed while we released
1503 cond_resched_lock(&kvm->mmu_lock);
1504 if (!READ_ONCE(kvm->arch.pgd))
1506 next = stage2_pgd_addr_end(kvm, addr, end);
1507 if (stage2_pgd_present(kvm, *pgd))
1508 stage2_wp_puds(kvm, pgd, addr, next);
1509 } while (pgd++, addr = next, addr != end);
1513 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1514 * @kvm: The KVM pointer
1515 * @slot: The memory slot to write protect
1517 * Called to start logging dirty pages after memory region
1518 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1519 * all present PUD, PMD and PTEs are write protected in the memory region.
1520 * Afterwards read of dirty page log can be called.
1522 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1523 * serializing operations for VM memory regions.
1525 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1527 struct kvm_memslots *slots = kvm_memslots(kvm);
1528 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1529 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1530 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1532 spin_lock(&kvm->mmu_lock);
1533 stage2_wp_range(kvm, start, end);
1534 spin_unlock(&kvm->mmu_lock);
1535 kvm_flush_remote_tlbs(kvm);
1539 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1540 * @kvm: The KVM pointer
1541 * @slot: The memory slot associated with mask
1542 * @gfn_offset: The gfn offset in memory slot
1543 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1544 * slot to be write protected
1546 * Walks bits set in mask write protects the associated pte's. Caller must
1547 * acquire kvm_mmu_lock.
1549 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1550 struct kvm_memory_slot *slot,
1551 gfn_t gfn_offset, unsigned long mask)
1553 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1554 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1555 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1557 stage2_wp_range(kvm, start, end);
1561 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1564 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1565 * enable dirty logging for them.
1567 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1568 struct kvm_memory_slot *slot,
1569 gfn_t gfn_offset, unsigned long mask)
1571 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1574 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
1576 __clean_dcache_guest_page(pfn, size);
1579 static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
1581 __invalidate_icache_guest_page(pfn, size);
1584 static void kvm_send_hwpoison_signal(unsigned long address,
1585 struct vm_area_struct *vma)
1589 if (is_vm_hugetlb_page(vma))
1590 lsb = huge_page_shift(hstate_vma(vma));
1594 send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1597 static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
1601 hva_t uaddr_start, uaddr_end;
1604 size = memslot->npages * PAGE_SIZE;
1606 gpa_start = memslot->base_gfn << PAGE_SHIFT;
1608 uaddr_start = memslot->userspace_addr;
1609 uaddr_end = uaddr_start + size;
1612 * Pages belonging to memslots that don't have the same alignment
1613 * within a PMD for userspace and IPA cannot be mapped with stage-2
1614 * PMD entries, because we'll end up mapping the wrong pages.
1616 * Consider a layout like the following:
1618 * memslot->userspace_addr:
1619 * +-----+--------------------+--------------------+---+
1620 * |abcde|fgh Stage-1 PMD | Stage-1 PMD tv|xyz|
1621 * +-----+--------------------+--------------------+---+
1623 * memslot->base_gfn << PAGE_SIZE:
1624 * +---+--------------------+--------------------+-----+
1625 * |abc|def Stage-2 PMD | Stage-2 PMD |tvxyz|
1626 * +---+--------------------+--------------------+-----+
1628 * If we create those stage-2 PMDs, we'll end up with this incorrect
1634 if ((gpa_start & ~S2_PMD_MASK) != (uaddr_start & ~S2_PMD_MASK))
1638 * Next, let's make sure we're not trying to map anything not covered
1639 * by the memslot. This means we have to prohibit PMD size mappings
1640 * for the beginning and end of a non-PMD aligned and non-PMD sized
1641 * memory slot (illustrated by the head and tail parts of the
1642 * userspace view above containing pages 'abcde' and 'xyz',
1645 * Note that it doesn't matter if we do the check using the
1646 * userspace_addr or the base_gfn, as both are equally aligned (per
1647 * the check above) and equally sized.
1649 return (hva & S2_PMD_MASK) >= uaddr_start &&
1650 (hva & S2_PMD_MASK) + S2_PMD_SIZE <= uaddr_end;
1653 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1654 struct kvm_memory_slot *memslot, unsigned long hva,
1655 unsigned long fault_status)
1658 bool write_fault, writable, force_pte = false;
1659 bool exec_fault, needs_exec;
1660 unsigned long mmu_seq;
1661 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1662 struct kvm *kvm = vcpu->kvm;
1663 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1664 struct vm_area_struct *vma;
1666 pgprot_t mem_type = PAGE_S2;
1667 bool logging_active = memslot_is_logging(memslot);
1668 unsigned long vma_pagesize, flags = 0;
1670 write_fault = kvm_is_write_fault(vcpu);
1671 exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1672 VM_BUG_ON(write_fault && exec_fault);
1674 if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
1675 kvm_err("Unexpected L2 read permission error\n");
1679 if (!fault_supports_stage2_pmd_mappings(memslot, hva))
1685 /* Let's check if we will get back a huge page backed by hugetlbfs */
1686 down_read(¤t->mm->mmap_sem);
1687 vma = find_vma_intersection(current->mm, hva, hva + 1);
1688 if (unlikely(!vma)) {
1689 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1690 up_read(¤t->mm->mmap_sem);
1694 vma_pagesize = vma_kernel_pagesize(vma);
1696 * The stage2 has a minimum of 2 level table (For arm64 see
1697 * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
1698 * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
1699 * As for PUD huge maps, we must make sure that we have at least
1700 * 3 levels, i.e, PMD is not folded.
1702 if ((vma_pagesize == PMD_SIZE ||
1703 (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) &&
1705 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1707 up_read(¤t->mm->mmap_sem);
1709 /* We need minimum second+third level pages */
1710 ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
1715 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1717 * Ensure the read of mmu_notifier_seq happens before we call
1718 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1719 * the page we just got a reference to gets unmapped before we have a
1720 * chance to grab the mmu_lock, which ensure that if the page gets
1721 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1722 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1723 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1727 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1728 if (pfn == KVM_PFN_ERR_HWPOISON) {
1729 kvm_send_hwpoison_signal(hva, vma);
1732 if (is_error_noslot_pfn(pfn))
1735 if (kvm_is_device_pfn(pfn)) {
1736 mem_type = PAGE_S2_DEVICE;
1737 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1738 } else if (logging_active) {
1740 * Faults on pages in a memslot with logging enabled
1741 * should not be mapped with huge pages (it introduces churn
1742 * and performance degradation), so force a pte mapping.
1744 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1747 * Only actually map the page as writable if this was a write
1754 spin_lock(&kvm->mmu_lock);
1755 if (mmu_notifier_retry(kvm, mmu_seq))
1758 if (vma_pagesize == PAGE_SIZE && !force_pte) {
1760 * Only PMD_SIZE transparent hugepages(THP) are
1761 * currently supported. This code will need to be
1762 * updated to support other THP sizes.
1764 if (transparent_hugepage_adjust(&pfn, &fault_ipa))
1765 vma_pagesize = PMD_SIZE;
1769 kvm_set_pfn_dirty(pfn);
1771 if (fault_status != FSC_PERM)
1772 clean_dcache_guest_page(pfn, vma_pagesize);
1775 invalidate_icache_guest_page(pfn, vma_pagesize);
1778 * If we took an execution fault we have made the
1779 * icache/dcache coherent above and should now let the s2
1780 * mapping be executable.
1782 * Write faults (!exec_fault && FSC_PERM) are orthogonal to
1783 * execute permissions, and we preserve whatever we have.
1785 needs_exec = exec_fault ||
1786 (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
1788 if (vma_pagesize == PUD_SIZE) {
1789 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
1791 new_pud = kvm_pud_mkhuge(new_pud);
1793 new_pud = kvm_s2pud_mkwrite(new_pud);
1796 new_pud = kvm_s2pud_mkexec(new_pud);
1798 ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
1799 } else if (vma_pagesize == PMD_SIZE) {
1800 pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
1802 new_pmd = kvm_pmd_mkhuge(new_pmd);
1805 new_pmd = kvm_s2pmd_mkwrite(new_pmd);
1808 new_pmd = kvm_s2pmd_mkexec(new_pmd);
1810 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1812 pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
1815 new_pte = kvm_s2pte_mkwrite(new_pte);
1816 mark_page_dirty(kvm, gfn);
1820 new_pte = kvm_s2pte_mkexec(new_pte);
1822 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1826 spin_unlock(&kvm->mmu_lock);
1827 kvm_set_pfn_accessed(pfn);
1828 kvm_release_pfn_clean(pfn);
1833 * Resolve the access fault by making the page young again.
1834 * Note that because the faulting entry is guaranteed not to be
1835 * cached in the TLB, we don't need to invalidate anything.
1836 * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1837 * so there is no need for atomic (pte|pmd)_mkyoung operations.
1839 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1845 bool pfn_valid = false;
1847 trace_kvm_access_fault(fault_ipa);
1849 spin_lock(&vcpu->kvm->mmu_lock);
1851 if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
1854 if (pud) { /* HugeTLB */
1855 *pud = kvm_s2pud_mkyoung(*pud);
1856 pfn = kvm_pud_pfn(*pud);
1858 } else if (pmd) { /* THP, HugeTLB */
1859 *pmd = pmd_mkyoung(*pmd);
1860 pfn = pmd_pfn(*pmd);
1863 *pte = pte_mkyoung(*pte); /* Just a page... */
1864 pfn = pte_pfn(*pte);
1869 spin_unlock(&vcpu->kvm->mmu_lock);
1871 kvm_set_pfn_accessed(pfn);
1875 * kvm_handle_guest_abort - handles all 2nd stage aborts
1876 * @vcpu: the VCPU pointer
1877 * @run: the kvm_run structure
1879 * Any abort that gets to the host is almost guaranteed to be caused by a
1880 * missing second stage translation table entry, which can mean that either the
1881 * guest simply needs more memory and we must allocate an appropriate page or it
1882 * can mean that the guest tried to access I/O memory, which is emulated by user
1883 * space. The distinction is based on the IPA causing the fault and whether this
1884 * memory region has been registered as standard RAM by user space.
1886 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1888 unsigned long fault_status;
1889 phys_addr_t fault_ipa;
1890 struct kvm_memory_slot *memslot;
1892 bool is_iabt, write_fault, writable;
1896 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1898 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1899 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1901 /* Synchronous External Abort? */
1902 if (kvm_vcpu_dabt_isextabt(vcpu)) {
1904 * For RAS the host kernel may handle this abort.
1905 * There is no need to pass the error into the guest.
1907 if (!kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
1910 if (unlikely(!is_iabt)) {
1911 kvm_inject_vabt(vcpu);
1916 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1917 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1919 /* Check the stage-2 fault is trans. fault or write fault */
1920 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1921 fault_status != FSC_ACCESS) {
1922 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1923 kvm_vcpu_trap_get_class(vcpu),
1924 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1925 (unsigned long)kvm_vcpu_get_hsr(vcpu));
1929 idx = srcu_read_lock(&vcpu->kvm->srcu);
1931 gfn = fault_ipa >> PAGE_SHIFT;
1932 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1933 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1934 write_fault = kvm_is_write_fault(vcpu);
1935 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1937 /* Prefetch Abort on I/O address */
1938 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1944 * Check for a cache maintenance operation. Since we
1945 * ended-up here, we know it is outside of any memory
1946 * slot. But we can't find out if that is for a device,
1947 * or if the guest is just being stupid. The only thing
1948 * we know for sure is that this range cannot be cached.
1950 * So let's assume that the guest is just being
1951 * cautious, and skip the instruction.
1953 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1954 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1960 * The IPA is reported as [MAX:12], so we need to
1961 * complement it with the bottom 12 bits from the
1962 * faulting VA. This is always 12 bits, irrespective
1965 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1966 ret = io_mem_abort(vcpu, run, fault_ipa);
1970 /* Userspace should not be able to register out-of-bounds IPAs */
1971 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1973 if (fault_status == FSC_ACCESS) {
1974 handle_access_fault(vcpu, fault_ipa);
1979 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1983 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1987 static int handle_hva_to_gpa(struct kvm *kvm,
1988 unsigned long start,
1990 int (*handler)(struct kvm *kvm,
1991 gpa_t gpa, u64 size,
1995 struct kvm_memslots *slots;
1996 struct kvm_memory_slot *memslot;
1999 slots = kvm_memslots(kvm);
2001 /* we only care about the pages that the guest sees */
2002 kvm_for_each_memslot(memslot, slots) {
2003 unsigned long hva_start, hva_end;
2006 hva_start = max(start, memslot->userspace_addr);
2007 hva_end = min(end, memslot->userspace_addr +
2008 (memslot->npages << PAGE_SHIFT));
2009 if (hva_start >= hva_end)
2012 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
2013 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
2019 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2021 unmap_stage2_range(kvm, gpa, size);
2025 int kvm_unmap_hva_range(struct kvm *kvm,
2026 unsigned long start, unsigned long end)
2031 trace_kvm_unmap_hva_range(start, end);
2032 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
2036 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2038 pte_t *pte = (pte_t *)data;
2040 WARN_ON(size != PAGE_SIZE);
2042 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
2043 * flag clear because MMU notifiers will have unmapped a huge PMD before
2044 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
2045 * therefore stage2_set_pte() never needs to clear out a huge PMD
2046 * through this calling path.
2048 stage2_set_pte(kvm, NULL, gpa, pte, 0);
2053 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
2055 unsigned long end = hva + PAGE_SIZE;
2056 kvm_pfn_t pfn = pte_pfn(pte);
2062 trace_kvm_set_spte_hva(hva);
2065 * We've moved a page around, probably through CoW, so let's treat it
2066 * just like a translation fault and clean the cache to the PoC.
2068 clean_dcache_guest_page(pfn, PAGE_SIZE);
2069 stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
2070 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
2075 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2081 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2082 if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
2086 return stage2_pudp_test_and_clear_young(pud);
2088 return stage2_pmdp_test_and_clear_young(pmd);
2090 return stage2_ptep_test_and_clear_young(pte);
2093 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
2099 WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
2100 if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
2104 return kvm_s2pud_young(*pud);
2106 return pmd_young(*pmd);
2108 return pte_young(*pte);
2111 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
2115 trace_kvm_age_hva(start, end);
2116 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
2119 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
2123 trace_kvm_test_age_hva(hva);
2124 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
2127 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
2129 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
2132 phys_addr_t kvm_mmu_get_httbr(void)
2134 if (__kvm_cpu_uses_extended_idmap())
2135 return virt_to_phys(merged_hyp_pgd);
2137 return virt_to_phys(hyp_pgd);
2140 phys_addr_t kvm_get_idmap_vector(void)
2142 return hyp_idmap_vector;
2145 static int kvm_map_idmap_text(pgd_t *pgd)
2149 /* Create the idmap in the boot page tables */
2150 err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
2151 hyp_idmap_start, hyp_idmap_end,
2152 __phys_to_pfn(hyp_idmap_start),
2155 kvm_err("Failed to idmap %lx-%lx\n",
2156 hyp_idmap_start, hyp_idmap_end);
2161 int kvm_mmu_init(void)
2165 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
2166 hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
2167 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
2168 hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
2169 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
2172 * We rely on the linker script to ensure at build time that the HYP
2173 * init code does not cross a page boundary.
2175 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
2177 kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
2178 kvm_debug("HYP VA range: %lx:%lx\n",
2179 kern_hyp_va(PAGE_OFFSET),
2180 kern_hyp_va((unsigned long)high_memory - 1));
2182 if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
2183 hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) &&
2184 hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
2186 * The idmap page is intersecting with the VA space,
2187 * it is not safe to continue further.
2189 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
2194 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
2196 kvm_err("Hyp mode PGD not allocated\n");
2201 if (__kvm_cpu_uses_extended_idmap()) {
2202 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
2204 if (!boot_hyp_pgd) {
2205 kvm_err("Hyp boot PGD not allocated\n");
2210 err = kvm_map_idmap_text(boot_hyp_pgd);
2214 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2215 if (!merged_hyp_pgd) {
2216 kvm_err("Failed to allocate extra HYP pgd\n");
2219 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
2222 err = kvm_map_idmap_text(hyp_pgd);
2227 io_map_base = hyp_idmap_start;
2234 void kvm_arch_commit_memory_region(struct kvm *kvm,
2235 const struct kvm_userspace_memory_region *mem,
2236 const struct kvm_memory_slot *old,
2237 const struct kvm_memory_slot *new,
2238 enum kvm_mr_change change)
2241 * At this point memslot has been committed and there is an
2242 * allocated dirty_bitmap[], dirty pages will be be tracked while the
2243 * memory slot is write protected.
2245 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
2246 kvm_mmu_wp_memory_region(kvm, mem->slot);
2249 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2250 struct kvm_memory_slot *memslot,
2251 const struct kvm_userspace_memory_region *mem,
2252 enum kvm_mr_change change)
2254 hva_t hva = mem->userspace_addr;
2255 hva_t reg_end = hva + mem->memory_size;
2256 bool writable = !(mem->flags & KVM_MEM_READONLY);
2259 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
2260 change != KVM_MR_FLAGS_ONLY)
2264 * Prevent userspace from creating a memory region outside of the IPA
2265 * space addressable by the KVM guest IPA space.
2267 if (memslot->base_gfn + memslot->npages >=
2268 (kvm_phys_size(kvm) >> PAGE_SHIFT))
2271 down_read(¤t->mm->mmap_sem);
2273 * A memory region could potentially cover multiple VMAs, and any holes
2274 * between them, so iterate over all of them to find out if we can map
2275 * any of them right now.
2277 * +--------------------------------------------+
2278 * +---------------+----------------+ +----------------+
2279 * | : VMA 1 | VMA 2 | | VMA 3 : |
2280 * +---------------+----------------+ +----------------+
2282 * +--------------------------------------------+
2285 struct vm_area_struct *vma = find_vma(current->mm, hva);
2286 hva_t vm_start, vm_end;
2288 if (!vma || vma->vm_start >= reg_end)
2292 * Mapping a read-only VMA is only allowed if the
2293 * memory region is configured as read-only.
2295 if (writable && !(vma->vm_flags & VM_WRITE)) {
2301 * Take the intersection of this VMA with the memory region
2303 vm_start = max(hva, vma->vm_start);
2304 vm_end = min(reg_end, vma->vm_end);
2306 if (vma->vm_flags & VM_PFNMAP) {
2307 gpa_t gpa = mem->guest_phys_addr +
2308 (vm_start - mem->userspace_addr);
2311 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
2312 pa += vm_start - vma->vm_start;
2314 /* IO region dirty page logging not allowed */
2315 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2320 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
2327 } while (hva < reg_end);
2329 if (change == KVM_MR_FLAGS_ONLY)
2332 spin_lock(&kvm->mmu_lock);
2334 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
2336 stage2_flush_memslot(kvm, memslot);
2337 spin_unlock(&kvm->mmu_lock);
2339 up_read(¤t->mm->mmap_sem);
2343 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
2344 struct kvm_memory_slot *dont)
2348 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2349 unsigned long npages)
2354 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
2358 void kvm_arch_flush_shadow_all(struct kvm *kvm)
2360 kvm_free_stage2_pgd(kvm);
2363 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2364 struct kvm_memory_slot *slot)
2366 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2367 phys_addr_t size = slot->npages << PAGE_SHIFT;
2369 spin_lock(&kvm->mmu_lock);
2370 unmap_stage2_range(kvm, gpa, size);
2371 spin_unlock(&kvm->mmu_lock);
2375 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2378 * - S/W ops are local to a CPU (not broadcast)
2379 * - We have line migration behind our back (speculation)
2380 * - System caches don't support S/W at all (damn!)
2382 * In the face of the above, the best we can do is to try and convert
2383 * S/W ops to VA ops. Because the guest is not allowed to infer the
2384 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2385 * which is a rather good thing for us.
2387 * Also, it is only used when turning caches on/off ("The expected
2388 * usage of the cache maintenance instructions that operate by set/way
2389 * is associated with the cache maintenance instructions associated
2390 * with the powerdown and powerup of caches, if this is required by
2391 * the implementation.").
2393 * We use the following policy:
2395 * - If we trap a S/W operation, we enable VM trapping to detect
2396 * caches being turned on/off, and do a full clean.
2398 * - We flush the caches on both caches being turned on and off.
2400 * - Once the caches are enabled, we stop trapping VM ops.
2402 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2404 unsigned long hcr = *vcpu_hcr(vcpu);
2407 * If this is the first time we do a S/W operation
2408 * (i.e. HCR_TVM not set) flush the whole memory, and set the
2411 * Otherwise, rely on the VM trapping to wait for the MMU +
2412 * Caches to be turned off. At that point, we'll be able to
2413 * clean the caches again.
2415 if (!(hcr & HCR_TVM)) {
2416 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2417 vcpu_has_cache_enabled(vcpu));
2418 stage2_flush_vm(vcpu->kvm);
2419 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2423 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2425 bool now_enabled = vcpu_has_cache_enabled(vcpu);
2428 * If switching the MMU+caches on, need to invalidate the caches.
2429 * If switching it off, need to clean the caches.
2430 * Clean + invalidate does the trick always.
2432 if (now_enabled != was_enabled)
2433 stage2_flush_vm(vcpu->kvm);
2435 /* Caches are now on, stop trapping VM ops (until a S/W op) */
2437 *vcpu_hcr(vcpu) &= ~HCR_TVM;
2439 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);