2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
15 #include <linux/log2.h>
17 #include <asm/tlbflush.h>
18 #include <asm/trace.h>
19 #include <asm/kvm_ppc.h>
20 #include <asm/kvm_book3s.h>
21 #include <asm/book3s/64/mmu-hash.h>
22 #include <asm/hvcall.h>
23 #include <asm/synch.h>
24 #include <asm/ppc-opcode.h>
25 #include <asm/pte-walk.h>
27 /* Translate address of a vmalloc'd thing to a linear map address */
28 static void *real_vmalloc_addr(void *x)
30 unsigned long addr = (unsigned long) x;
33 * assume we don't have huge pages in vmalloc space...
34 * So don't worry about THP collapse/split. Called
35 * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
37 p = find_init_mm_pte(addr, NULL);
38 if (!p || !pte_present(*p))
40 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
44 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
45 static int global_invalidates(struct kvm *kvm, unsigned long flags)
51 * If there is only one vcore, and it's currently running,
52 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
53 * we can use tlbiel as long as we mark all other physical
54 * cores as potentially having stale TLB entries for this lpid.
55 * Otherwise, don't use tlbiel.
57 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
63 /* any other core might now have stale TLB entries... */
65 cpumask_setall(&kvm->arch.need_tlb_flush);
66 cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
68 * On POWER9, threads are independent but the TLB is shared,
69 * so use the bit for the first thread to represent the core.
71 if (cpu_has_feature(CPU_FTR_ARCH_300))
72 cpu = cpu_first_thread_sibling(cpu);
73 cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
80 * Add this HPTE into the chain for the real page.
81 * Must be called with the chain locked; it unlocks the chain.
83 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
84 unsigned long *rmap, long pte_index, int realmode)
86 struct revmap_entry *head, *tail;
89 if (*rmap & KVMPPC_RMAP_PRESENT) {
90 i = *rmap & KVMPPC_RMAP_INDEX;
91 head = &kvm->arch.hpt.rev[i];
93 head = real_vmalloc_addr(head);
94 tail = &kvm->arch.hpt.rev[head->back];
96 tail = real_vmalloc_addr(tail);
98 rev->back = head->back;
99 tail->forw = pte_index;
100 head->back = pte_index;
102 rev->forw = rev->back = pte_index;
103 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
104 pte_index | KVMPPC_RMAP_PRESENT;
108 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
110 /* Update the changed page order field of an rmap entry */
111 void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize)
117 order = ilog2(psize);
118 order <<= KVMPPC_RMAP_CHG_SHIFT;
119 if (order > (*rmap & KVMPPC_RMAP_CHG_ORDER))
120 *rmap = (*rmap & ~KVMPPC_RMAP_CHG_ORDER) | order;
122 EXPORT_SYMBOL_GPL(kvmppc_update_rmap_change);
124 /* Returns a pointer to the revmap entry for the page mapped by a HPTE */
125 static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
126 unsigned long hpte_gr)
128 struct kvm_memory_slot *memslot;
132 gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr));
133 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
137 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
141 /* Remove this HPTE from the chain for a real page */
142 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
143 struct revmap_entry *rev,
144 unsigned long hpte_v, unsigned long hpte_r)
146 struct revmap_entry *next, *prev;
147 unsigned long ptel, head;
149 unsigned long rcbits;
151 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
152 ptel = rev->guest_rpte |= rcbits;
153 rmap = revmap_for_hpte(kvm, hpte_v, ptel);
158 head = *rmap & KVMPPC_RMAP_INDEX;
159 next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
160 prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
161 next->back = rev->back;
162 prev->forw = rev->forw;
163 if (head == pte_index) {
165 if (head == pte_index)
166 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
168 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
170 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
171 if (rcbits & HPTE_R_C)
172 kvmppc_update_rmap_change(rmap, hpte_page_size(hpte_v, hpte_r));
176 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
177 long pte_index, unsigned long pteh, unsigned long ptel,
178 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
180 unsigned long i, pa, gpa, gfn, psize;
181 unsigned long slot_fn, hva;
183 struct revmap_entry *rev;
184 unsigned long g_ptel;
185 struct kvm_memory_slot *memslot;
186 unsigned hpage_shift;
190 unsigned int writing;
191 unsigned long mmu_seq;
192 unsigned long rcbits, irq_flags = 0;
194 if (kvm_is_radix(kvm))
196 psize = hpte_page_size(pteh, ptel);
199 writing = hpte_is_writable(ptel);
200 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
201 ptel &= ~HPTE_GR_RESERVED;
204 /* used later to detect if we might have been invalidated */
205 mmu_seq = kvm->mmu_notifier_seq;
208 /* Find the memslot (if any) for this address */
209 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
210 gfn = gpa >> PAGE_SHIFT;
211 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
215 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
216 /* Emulated MMIO - mark this with key=31 */
217 pteh |= HPTE_V_ABSENT;
218 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
222 /* Check if the requested page fits entirely in the memslot. */
223 if (!slot_is_aligned(memslot, psize))
225 slot_fn = gfn - memslot->base_gfn;
226 rmap = &memslot->arch.rmap[slot_fn];
228 /* Translate to host virtual address */
229 hva = __gfn_to_hva_memslot(memslot, gfn);
231 * If we had a page table table change after lookup, we would
232 * retry via mmu_notifier_retry.
235 local_irq_save(irq_flags);
237 * If called in real mode we have MSR_EE = 0. Otherwise
238 * we disable irq above.
240 ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
243 unsigned int host_pte_size;
246 host_pte_size = 1ul << hpage_shift;
248 host_pte_size = PAGE_SIZE;
250 * We should always find the guest page size
251 * to <= host page size, if host is using hugepage
253 if (host_pte_size < psize) {
255 local_irq_restore(flags);
258 pte = kvmppc_read_update_linux_pte(ptep, writing);
259 if (pte_present(pte) && !pte_protnone(pte)) {
260 if (writing && !__pte_write(pte))
261 /* make the actual HPTE be read-only */
262 ptel = hpte_make_readonly(ptel);
264 pa = pte_pfn(pte) << PAGE_SHIFT;
265 pa |= hva & (host_pte_size - 1);
266 pa |= gpa & ~PAGE_MASK;
270 local_irq_restore(irq_flags);
272 ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
276 pteh |= HPTE_V_VALID;
278 pteh |= HPTE_V_ABSENT;
279 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
282 /*If we had host pte mapping then Check WIMG */
283 if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
287 * Allow guest to map emulated device memory as
288 * uncacheable, but actually make it cacheable.
290 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
294 /* Find and lock the HPTEG slot to use */
296 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
298 if (likely((flags & H_EXACT) == 0)) {
300 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
301 for (i = 0; i < 8; ++i) {
302 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
303 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
310 * Since try_lock_hpte doesn't retry (not even stdcx.
311 * failures), it could be that there is a free slot
312 * but we transiently failed to lock it. Try again,
313 * actually locking each slot and checking it.
316 for (i = 0; i < 8; ++i) {
318 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
320 pte = be64_to_cpu(hpte[0]);
321 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
323 __unlock_hpte(hpte, pte);
331 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
332 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
334 /* Lock the slot and check again */
337 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
339 pte = be64_to_cpu(hpte[0]);
340 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
341 __unlock_hpte(hpte, pte);
347 /* Save away the guest's idea of the second HPTE dword */
348 rev = &kvm->arch.hpt.rev[pte_index];
350 rev = real_vmalloc_addr(rev);
352 rev->guest_rpte = g_ptel;
353 note_hpte_modification(kvm, rev);
356 /* Link HPTE into reverse-map chain */
357 if (pteh & HPTE_V_VALID) {
359 rmap = real_vmalloc_addr(rmap);
361 /* Check for pending invalidations under the rmap chain lock */
362 if (mmu_notifier_retry(kvm, mmu_seq)) {
363 /* inval in progress, write a non-present HPTE */
364 pteh |= HPTE_V_ABSENT;
365 pteh &= ~HPTE_V_VALID;
366 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
369 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
371 /* Only set R/C in real HPTE if already set in *rmap */
372 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
373 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
377 /* Convert to new format on P9 */
378 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
379 ptel = hpte_old_to_new_r(pteh, ptel);
380 pteh = hpte_old_to_new_v(pteh);
382 hpte[1] = cpu_to_be64(ptel);
384 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
386 __unlock_hpte(hpte, pteh);
387 asm volatile("ptesync" : : : "memory");
389 *pte_idx_ret = pte_index;
392 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
394 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
395 long pte_index, unsigned long pteh, unsigned long ptel)
397 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
398 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
401 #ifdef __BIG_ENDIAN__
402 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
404 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
407 static inline int is_mmio_hpte(unsigned long v, unsigned long r)
409 return ((v & HPTE_V_ABSENT) &&
410 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
411 (HPTE_R_KEY_HI | HPTE_R_KEY_LO));
414 static inline int try_lock_tlbie(unsigned int *lock)
416 unsigned int tmp, old;
417 unsigned int token = LOCK_TOKEN;
419 asm volatile("1:lwarx %1,0,%2\n"
426 : "=&r" (tmp), "=&r" (old)
427 : "r" (lock), "r" (token)
432 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
433 long npages, int global, bool need_sync)
438 * We use the POWER9 5-operand versions of tlbie and tlbiel here.
439 * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
440 * the RS field, this is backwards-compatible with P7 and P8.
443 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
446 asm volatile("ptesync" : : : "memory");
447 for (i = 0; i < npages; ++i) {
448 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
449 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
450 trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
451 kvm->arch.lpid, 0, 0, 0);
453 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
454 kvm->arch.tlbie_lock = 0;
457 asm volatile("ptesync" : : : "memory");
458 for (i = 0; i < npages; ++i) {
459 asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
460 "r" (rbvalues[i]), "r" (0));
461 trace_tlbie(kvm->arch.lpid, 1, rbvalues[i],
464 asm volatile("ptesync" : : : "memory");
468 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
469 unsigned long pte_index, unsigned long avpn,
470 unsigned long *hpret)
473 unsigned long v, r, rb;
474 struct revmap_entry *rev;
475 u64 pte, orig_pte, pte_r;
477 if (kvm_is_radix(kvm))
479 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
481 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
482 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
484 pte = orig_pte = be64_to_cpu(hpte[0]);
485 pte_r = be64_to_cpu(hpte[1]);
486 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
487 pte = hpte_new_to_old_v(pte, pte_r);
488 pte_r = hpte_new_to_old_r(pte_r);
490 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
491 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
492 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
493 __unlock_hpte(hpte, orig_pte);
497 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
498 v = pte & ~HPTE_V_HVLOCK;
499 if (v & HPTE_V_VALID) {
500 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
501 rb = compute_tlbie_rb(v, pte_r, pte_index);
502 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
504 * The reference (R) and change (C) bits in a HPT
505 * entry can be set by hardware at any time up until
506 * the HPTE is invalidated and the TLB invalidation
507 * sequence has completed. This means that when
508 * removing a HPTE, we need to re-read the HPTE after
509 * the invalidation sequence has completed in order to
510 * obtain reliable values of R and C.
512 remove_revmap_chain(kvm, pte_index, rev, v,
513 be64_to_cpu(hpte[1]));
515 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
516 note_hpte_modification(kvm, rev);
517 unlock_hpte(hpte, 0);
519 if (is_mmio_hpte(v, pte_r))
520 atomic64_inc(&kvm->arch.mmio_update);
522 if (v & HPTE_V_ABSENT)
523 v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
528 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
530 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
531 unsigned long pte_index, unsigned long avpn)
533 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
537 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
539 struct kvm *kvm = vcpu->kvm;
540 unsigned long *args = &vcpu->arch.gpr[4];
541 __be64 *hp, *hptes[4];
542 unsigned long tlbrb[4];
543 long int i, j, k, n, found, indexes[4];
544 unsigned long flags, req, pte_index, rcbits;
546 long int ret = H_SUCCESS;
547 struct revmap_entry *rev, *revs[4];
550 if (kvm_is_radix(kvm))
552 global = global_invalidates(kvm, 0);
553 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
558 flags = pte_index >> 56;
559 pte_index &= ((1ul << 56) - 1);
562 if (req == 3) { /* no more requests */
566 if (req != 1 || flags == 3 ||
567 pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
568 /* parameter error */
569 args[j] = ((0xa0 | flags) << 56) + pte_index;
573 hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
574 /* to avoid deadlock, don't spin except for first */
575 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
578 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
582 hp0 = be64_to_cpu(hp[0]);
583 hp1 = be64_to_cpu(hp[1]);
584 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
585 hp0 = hpte_new_to_old_v(hp0, hp1);
586 hp1 = hpte_new_to_old_r(hp1);
588 if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
590 case 0: /* absolute */
593 case 1: /* andcond */
594 if (!(hp0 & args[j + 1]))
598 if ((hp0 & ~0x7fUL) == args[j + 1])
604 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
605 args[j] = ((0x90 | flags) << 56) + pte_index;
609 args[j] = ((0x80 | flags) << 56) + pte_index;
610 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
611 note_hpte_modification(kvm, rev);
613 if (!(hp0 & HPTE_V_VALID)) {
614 /* insert R and C bits from PTE */
615 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
616 args[j] |= rcbits << (56 - 5);
618 if (is_mmio_hpte(hp0, hp1))
619 atomic64_inc(&kvm->arch.mmio_update);
623 /* leave it locked */
624 hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
625 tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
635 /* Now that we've collected a batch, do the tlbies */
636 do_tlbies(kvm, tlbrb, n, global, true);
638 /* Read PTE low words after tlbie to get final R/C values */
639 for (k = 0; k < n; ++k) {
641 pte_index = args[j] & ((1ul << 56) - 1);
644 remove_revmap_chain(kvm, pte_index, rev,
645 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
646 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
647 args[j] |= rcbits << (56 - 5);
648 __unlock_hpte(hp, 0);
655 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
656 unsigned long pte_index, unsigned long avpn,
659 struct kvm *kvm = vcpu->kvm;
661 struct revmap_entry *rev;
662 unsigned long v, r, rb, mask, bits;
665 if (kvm_is_radix(kvm))
667 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
670 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
671 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
673 v = pte_v = be64_to_cpu(hpte[0]);
674 if (cpu_has_feature(CPU_FTR_ARCH_300))
675 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
676 if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
677 ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
678 __unlock_hpte(hpte, pte_v);
682 pte_r = be64_to_cpu(hpte[1]);
683 bits = (flags << 55) & HPTE_R_PP0;
684 bits |= (flags << 48) & HPTE_R_KEY_HI;
685 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
687 /* Update guest view of 2nd HPTE dword */
688 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
689 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
690 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
692 r = (rev->guest_rpte & ~mask) | bits;
694 note_hpte_modification(kvm, rev);
698 if (v & HPTE_V_VALID) {
700 * If the page is valid, don't let it transition from
701 * readonly to writable. If it should be writable, we'll
702 * take a trap and let the page fault code sort it out.
704 r = (pte_r & ~mask) | bits;
705 if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
706 r = hpte_make_readonly(r);
707 /* If the PTE is changing, invalidate it first */
709 rb = compute_tlbie_rb(v, r, pte_index);
710 hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
712 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
714 /* Don't lose R/C bit updates done by hardware */
715 r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
716 hpte[1] = cpu_to_be64(r);
719 unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
720 asm volatile("ptesync" : : : "memory");
721 if (is_mmio_hpte(v, pte_r))
722 atomic64_inc(&kvm->arch.mmio_update);
727 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
728 unsigned long pte_index)
730 struct kvm *kvm = vcpu->kvm;
734 struct revmap_entry *rev = NULL;
736 if (kvm_is_radix(kvm))
738 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
740 if (flags & H_READ_4) {
744 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
745 for (i = 0; i < n; ++i, ++pte_index) {
746 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
747 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
748 r = be64_to_cpu(hpte[1]);
749 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
750 v = hpte_new_to_old_v(v, r);
751 r = hpte_new_to_old_r(r);
753 if (v & HPTE_V_ABSENT) {
757 if (v & HPTE_V_VALID) {
758 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
759 r &= ~HPTE_GR_RESERVED;
761 vcpu->arch.gpr[4 + i * 2] = v;
762 vcpu->arch.gpr[5 + i * 2] = r;
767 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
768 unsigned long pte_index)
770 struct kvm *kvm = vcpu->kvm;
772 unsigned long v, r, gr;
773 struct revmap_entry *rev;
775 long ret = H_NOT_FOUND;
777 if (kvm_is_radix(kvm))
779 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
782 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
783 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
784 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
786 v = be64_to_cpu(hpte[0]);
787 r = be64_to_cpu(hpte[1]);
788 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
791 gr = rev->guest_rpte;
792 if (rev->guest_rpte & HPTE_R_R) {
793 rev->guest_rpte &= ~HPTE_R_R;
794 note_hpte_modification(kvm, rev);
796 if (v & HPTE_V_VALID) {
797 gr |= r & (HPTE_R_R | HPTE_R_C);
799 kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
800 rmap = revmap_for_hpte(kvm, v, gr);
803 *rmap |= KVMPPC_RMAP_REFERENCED;
808 vcpu->arch.gpr[4] = gr;
811 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
815 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
816 unsigned long pte_index)
818 struct kvm *kvm = vcpu->kvm;
820 unsigned long v, r, gr;
821 struct revmap_entry *rev;
823 long ret = H_NOT_FOUND;
825 if (kvm_is_radix(kvm))
827 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
830 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
831 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
832 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
834 v = be64_to_cpu(hpte[0]);
835 r = be64_to_cpu(hpte[1]);
836 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
839 gr = rev->guest_rpte;
841 rev->guest_rpte &= ~HPTE_R_C;
842 note_hpte_modification(kvm, rev);
844 if (v & HPTE_V_VALID) {
845 /* need to make it temporarily absent so C is stable */
846 hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
847 kvmppc_invalidate_hpte(kvm, hpte, pte_index);
848 r = be64_to_cpu(hpte[1]);
849 gr |= r & (HPTE_R_R | HPTE_R_C);
851 unsigned long psize = hpte_page_size(v, r);
852 hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
854 rmap = revmap_for_hpte(kvm, v, gr);
857 *rmap |= KVMPPC_RMAP_CHANGED;
858 kvmppc_update_rmap_change(rmap, psize);
863 vcpu->arch.gpr[4] = gr;
866 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
870 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
871 unsigned long pte_index)
876 hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
877 hp0 = be64_to_cpu(hptep[0]);
878 hp1 = be64_to_cpu(hptep[1]);
879 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
880 hp0 = hpte_new_to_old_v(hp0, hp1);
881 hp1 = hpte_new_to_old_r(hp1);
883 rb = compute_tlbie_rb(hp0, hp1, pte_index);
884 do_tlbies(kvm, &rb, 1, 1, true);
886 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
888 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
889 unsigned long pte_index)
895 hp0 = be64_to_cpu(hptep[0]);
896 hp1 = be64_to_cpu(hptep[1]);
897 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
898 hp0 = hpte_new_to_old_v(hp0, hp1);
899 hp1 = hpte_new_to_old_r(hp1);
901 rb = compute_tlbie_rb(hp0, hp1, pte_index);
902 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
903 /* modify only the second-last byte, which contains the ref bit */
904 *((char *)hptep + 14) = rbyte;
905 do_tlbies(kvm, &rb, 1, 1, false);
907 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
909 static int slb_base_page_shift[4] = {
913 20, /* 1M, unsupported */
916 static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
917 unsigned long eaddr, unsigned long slb_v, long mmio_update)
919 struct mmio_hpte_cache_entry *entry = NULL;
923 for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
924 entry = &vcpu->arch.mmio_cache.entry[i];
925 if (entry->mmio_update == mmio_update) {
926 pshift = entry->slb_base_pshift;
927 if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
928 entry->slb_v == slb_v)
935 static struct mmio_hpte_cache_entry *
936 next_mmio_cache_entry(struct kvm_vcpu *vcpu)
938 unsigned int index = vcpu->arch.mmio_cache.index;
940 vcpu->arch.mmio_cache.index++;
941 if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
942 vcpu->arch.mmio_cache.index = 0;
944 return &vcpu->arch.mmio_cache.entry[index];
947 /* When called from virtmode, this func should be protected by
948 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
949 * can trigger deadlock issue.
951 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
956 unsigned long somask;
957 unsigned long vsid, hash;
960 unsigned long mask, val;
961 unsigned long v, r, orig_v;
963 /* Get page shift, work out hash and AVPN etc. */
964 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
967 if (slb_v & SLB_VSID_L) {
968 mask |= HPTE_V_LARGE;
970 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
972 if (slb_v & SLB_VSID_B_1T) {
973 somask = (1UL << 40) - 1;
974 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
977 somask = (1UL << 28) - 1;
978 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
980 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt);
981 avpn = slb_v & ~(somask >> 16); /* also includes B */
982 avpn |= (eaddr & somask) >> 16;
985 avpn &= ~((1UL << (pshift - 16)) - 1);
991 hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
993 for (i = 0; i < 16; i += 2) {
994 /* Read the PTE racily */
995 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
996 if (cpu_has_feature(CPU_FTR_ARCH_300))
997 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
999 /* Check valid/absent, hash, segment size and AVPN */
1000 if (!(v & valid) || (v & mask) != val)
1003 /* Lock the PTE and read it under the lock */
1004 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
1006 v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
1007 r = be64_to_cpu(hpte[i+1]);
1008 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1009 v = hpte_new_to_old_v(v, r);
1010 r = hpte_new_to_old_r(r);
1014 * Check the HPTE again, including base page size
1016 if ((v & valid) && (v & mask) == val &&
1017 hpte_base_page_size(v, r) == (1ul << pshift))
1018 /* Return with the HPTE still locked */
1019 return (hash << 3) + (i >> 1);
1021 __unlock_hpte(&hpte[i], orig_v);
1024 if (val & HPTE_V_SECONDARY)
1026 val |= HPTE_V_SECONDARY;
1027 hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt);
1031 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
1034 * Called in real mode to check whether an HPTE not found fault
1035 * is due to accessing a paged-out page or an emulated MMIO page,
1036 * or if a protection fault is due to accessing a page that the
1037 * guest wanted read/write access to but which we made read-only.
1038 * Returns a possibly modified status (DSISR) value if not
1039 * (i.e. pass the interrupt to the guest),
1040 * -1 to pass the fault up to host kernel mode code, -2 to do that
1041 * and also load the instruction word (for MMIO emulation),
1042 * or 0 if we should make the guest retry the access.
1044 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
1045 unsigned long slb_v, unsigned int status, bool data)
1047 struct kvm *kvm = vcpu->kvm;
1049 unsigned long v, r, gr, orig_v;
1051 unsigned long valid;
1052 struct revmap_entry *rev;
1053 unsigned long pp, key;
1054 struct mmio_hpte_cache_entry *cache_entry = NULL;
1055 long mmio_update = 0;
1057 /* For protection fault, expect to find a valid HPTE */
1058 valid = HPTE_V_VALID;
1059 if (status & DSISR_NOHPTE) {
1060 valid |= HPTE_V_ABSENT;
1061 mmio_update = atomic64_read(&kvm->arch.mmio_update);
1062 cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
1065 index = cache_entry->pte_index;
1066 v = cache_entry->hpte_v;
1067 r = cache_entry->hpte_r;
1068 gr = cache_entry->rpte;
1070 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
1072 if (status & DSISR_NOHPTE)
1073 return status; /* there really was no HPTE */
1074 return 0; /* for prot fault, HPTE disappeared */
1076 hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
1077 v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
1078 r = be64_to_cpu(hpte[1]);
1079 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1080 v = hpte_new_to_old_v(v, r);
1081 r = hpte_new_to_old_r(r);
1083 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
1084 gr = rev->guest_rpte;
1086 unlock_hpte(hpte, orig_v);
1089 /* For not found, if the HPTE is valid by now, retry the instruction */
1090 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
1093 /* Check access permissions to the page */
1094 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
1095 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
1096 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
1098 if (gr & (HPTE_R_N | HPTE_R_G))
1099 return status | SRR1_ISI_N_OR_G;
1100 if (!hpte_read_permission(pp, slb_v & key))
1101 return status | SRR1_ISI_PROT;
1102 } else if (status & DSISR_ISSTORE) {
1103 /* check write permission */
1104 if (!hpte_write_permission(pp, slb_v & key))
1105 return status | DSISR_PROTFAULT;
1107 if (!hpte_read_permission(pp, slb_v & key))
1108 return status | DSISR_PROTFAULT;
1111 /* Check storage key, if applicable */
1112 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
1113 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
1114 if (status & DSISR_ISSTORE)
1117 return status | DSISR_KEYFAULT;
1120 /* Save HPTE info for virtual-mode handler */
1121 vcpu->arch.pgfault_addr = addr;
1122 vcpu->arch.pgfault_index = index;
1123 vcpu->arch.pgfault_hpte[0] = v;
1124 vcpu->arch.pgfault_hpte[1] = r;
1125 vcpu->arch.pgfault_cache = cache_entry;
1127 /* Check the storage key to see if it is possibly emulated MMIO */
1128 if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
1129 (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
1131 unsigned int pshift = 12;
1132 unsigned int pshift_index;
1134 if (slb_v & SLB_VSID_L) {
1135 pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
1136 pshift = slb_base_page_shift[pshift_index];
1138 cache_entry = next_mmio_cache_entry(vcpu);
1139 cache_entry->eaddr = addr;
1140 cache_entry->slb_base_pshift = pshift;
1141 cache_entry->pte_index = index;
1142 cache_entry->hpte_v = v;
1143 cache_entry->hpte_r = r;
1144 cache_entry->rpte = gr;
1145 cache_entry->slb_v = slb_v;
1146 cache_entry->mmio_update = mmio_update;
1148 if (data && (vcpu->arch.shregs.msr & MSR_IR))
1149 return -2; /* MMIO emulation - load instr word */
1152 return -1; /* send fault up to host kernel mode */