1 // SPDX-License-Identifier: GPL-2.0
3 * KVM guest address space mapping code
5 * Copyright IBM Corp. 2007, 2016, 2018
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * David Hildenbrand <david@redhat.com>
8 * Janosch Frank <frankja@linux.vnet.ibm.com>
11 #include <linux/kernel.h>
13 #include <linux/swap.h>
14 #include <linux/smp.h>
15 #include <linux/spinlock.h>
16 #include <linux/slab.h>
17 #include <linux/swapops.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
26 #define GMAP_SHADOW_FAKE_TABLE 1ULL
29 * gmap_alloc - allocate and initialize a guest address space
30 * @mm: pointer to the parent mm_struct
31 * @limit: maximum address of the gmap address space
33 * Returns a guest address space structure.
35 static struct gmap *gmap_alloc(unsigned long limit)
40 unsigned long etype, atype;
42 if (limit < _REGION3_SIZE) {
43 limit = _REGION3_SIZE - 1;
44 atype = _ASCE_TYPE_SEGMENT;
45 etype = _SEGMENT_ENTRY_EMPTY;
46 } else if (limit < _REGION2_SIZE) {
47 limit = _REGION2_SIZE - 1;
48 atype = _ASCE_TYPE_REGION3;
49 etype = _REGION3_ENTRY_EMPTY;
50 } else if (limit < _REGION1_SIZE) {
51 limit = _REGION1_SIZE - 1;
52 atype = _ASCE_TYPE_REGION2;
53 etype = _REGION2_ENTRY_EMPTY;
56 atype = _ASCE_TYPE_REGION1;
57 etype = _REGION1_ENTRY_EMPTY;
59 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
62 INIT_LIST_HEAD(&gmap->crst_list);
63 INIT_LIST_HEAD(&gmap->children);
64 INIT_LIST_HEAD(&gmap->pt_list);
65 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
66 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
67 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
68 spin_lock_init(&gmap->guest_table_lock);
69 spin_lock_init(&gmap->shadow_lock);
70 atomic_set(&gmap->ref_count, 1);
71 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
75 list_add(&page->lru, &gmap->crst_list);
76 table = (unsigned long *) page_to_phys(page);
77 crst_table_init(table, etype);
79 gmap->asce = atype | _ASCE_TABLE_LENGTH |
80 _ASCE_USER_BITS | __pa(table);
81 gmap->asce_end = limit;
91 * gmap_create - create a guest address space
92 * @mm: pointer to the parent mm_struct
93 * @limit: maximum size of the gmap address space
95 * Returns a guest address space structure.
97 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
100 unsigned long gmap_asce;
102 gmap = gmap_alloc(limit);
106 spin_lock(&mm->context.lock);
107 list_add_rcu(&gmap->list, &mm->context.gmap_list);
108 if (list_is_singular(&mm->context.gmap_list))
109 gmap_asce = gmap->asce;
112 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
113 spin_unlock(&mm->context.lock);
116 EXPORT_SYMBOL_GPL(gmap_create);
118 static void gmap_flush_tlb(struct gmap *gmap)
120 if (MACHINE_HAS_IDTE)
121 __tlb_flush_idte(gmap->asce);
123 __tlb_flush_global();
126 static void gmap_radix_tree_free(struct radix_tree_root *root)
128 struct radix_tree_iter iter;
129 unsigned long indices[16];
134 /* A radix tree is freed by deleting all of its entries */
138 radix_tree_for_each_slot(slot, root, &iter, index) {
139 indices[nr] = iter.index;
143 for (i = 0; i < nr; i++) {
145 radix_tree_delete(root, index);
150 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
152 struct gmap_rmap *rmap, *rnext, *head;
153 struct radix_tree_iter iter;
154 unsigned long indices[16];
159 /* A radix tree is freed by deleting all of its entries */
163 radix_tree_for_each_slot(slot, root, &iter, index) {
164 indices[nr] = iter.index;
168 for (i = 0; i < nr; i++) {
170 head = radix_tree_delete(root, index);
171 gmap_for_each_rmap_safe(rmap, rnext, head)
178 * gmap_free - free a guest address space
179 * @gmap: pointer to the guest address space structure
181 * No locks required. There are no references to this gmap anymore.
183 static void gmap_free(struct gmap *gmap)
185 struct page *page, *next;
187 /* Flush tlb of all gmaps (if not already done for shadows) */
188 if (!(gmap_is_shadow(gmap) && gmap->removed))
189 gmap_flush_tlb(gmap);
190 /* Free all segment & region tables. */
191 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
192 __free_pages(page, CRST_ALLOC_ORDER);
193 gmap_radix_tree_free(&gmap->guest_to_host);
194 gmap_radix_tree_free(&gmap->host_to_guest);
196 /* Free additional data for a shadow gmap */
197 if (gmap_is_shadow(gmap)) {
198 /* Free all page tables. */
199 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
200 page_table_free_pgste(page);
201 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
202 /* Release reference to the parent */
203 gmap_put(gmap->parent);
210 * gmap_get - increase reference counter for guest address space
211 * @gmap: pointer to the guest address space structure
213 * Returns the gmap pointer
215 struct gmap *gmap_get(struct gmap *gmap)
217 atomic_inc(&gmap->ref_count);
220 EXPORT_SYMBOL_GPL(gmap_get);
223 * gmap_put - decrease reference counter for guest address space
224 * @gmap: pointer to the guest address space structure
226 * If the reference counter reaches zero the guest address space is freed.
228 void gmap_put(struct gmap *gmap)
230 if (atomic_dec_return(&gmap->ref_count) == 0)
233 EXPORT_SYMBOL_GPL(gmap_put);
236 * gmap_remove - remove a guest address space but do not free it yet
237 * @gmap: pointer to the guest address space structure
239 void gmap_remove(struct gmap *gmap)
241 struct gmap *sg, *next;
242 unsigned long gmap_asce;
244 /* Remove all shadow gmaps linked to this gmap */
245 if (!list_empty(&gmap->children)) {
246 spin_lock(&gmap->shadow_lock);
247 list_for_each_entry_safe(sg, next, &gmap->children, list) {
251 spin_unlock(&gmap->shadow_lock);
253 /* Remove gmap from the pre-mm list */
254 spin_lock(&gmap->mm->context.lock);
255 list_del_rcu(&gmap->list);
256 if (list_empty(&gmap->mm->context.gmap_list))
258 else if (list_is_singular(&gmap->mm->context.gmap_list))
259 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
260 struct gmap, list)->asce;
263 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
264 spin_unlock(&gmap->mm->context.lock);
269 EXPORT_SYMBOL_GPL(gmap_remove);
272 * gmap_enable - switch primary space to the guest address space
273 * @gmap: pointer to the guest address space structure
275 void gmap_enable(struct gmap *gmap)
277 S390_lowcore.gmap = (unsigned long) gmap;
279 EXPORT_SYMBOL_GPL(gmap_enable);
282 * gmap_disable - switch back to the standard primary address space
283 * @gmap: pointer to the guest address space structure
285 void gmap_disable(struct gmap *gmap)
287 S390_lowcore.gmap = 0UL;
289 EXPORT_SYMBOL_GPL(gmap_disable);
292 * gmap_get_enabled - get a pointer to the currently enabled gmap
294 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
296 struct gmap *gmap_get_enabled(void)
298 return (struct gmap *) S390_lowcore.gmap;
300 EXPORT_SYMBOL_GPL(gmap_get_enabled);
303 * gmap_alloc_table is assumed to be called with mmap_sem held
305 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
306 unsigned long init, unsigned long gaddr)
311 /* since we dont free the gmap table until gmap_free we can unlock */
312 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
315 new = (unsigned long *) page_to_phys(page);
316 crst_table_init(new, init);
317 spin_lock(&gmap->guest_table_lock);
318 if (*table & _REGION_ENTRY_INVALID) {
319 list_add(&page->lru, &gmap->crst_list);
320 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
321 (*table & _REGION_ENTRY_TYPE_MASK);
325 spin_unlock(&gmap->guest_table_lock);
327 __free_pages(page, CRST_ALLOC_ORDER);
332 * __gmap_segment_gaddr - find virtual address from segment pointer
333 * @entry: pointer to a segment table entry in the guest address space
335 * Returns the virtual address in the guest address space for the segment
337 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
340 unsigned long offset, mask;
342 offset = (unsigned long) entry / sizeof(unsigned long);
343 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
344 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
345 page = virt_to_page((void *)((unsigned long) entry & mask));
346 return page->index + offset;
350 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
351 * @gmap: pointer to the guest address space structure
352 * @vmaddr: address in the host process address space
354 * Returns 1 if a TLB flush is required
356 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
358 unsigned long *entry;
361 BUG_ON(gmap_is_shadow(gmap));
362 spin_lock(&gmap->guest_table_lock);
363 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
365 flush = (*entry != _SEGMENT_ENTRY_EMPTY);
366 *entry = _SEGMENT_ENTRY_EMPTY;
368 spin_unlock(&gmap->guest_table_lock);
373 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
374 * @gmap: pointer to the guest address space structure
375 * @gaddr: address in the guest address space
377 * Returns 1 if a TLB flush is required
379 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
381 unsigned long vmaddr;
383 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
385 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
389 * gmap_unmap_segment - unmap segment from the guest address space
390 * @gmap: pointer to the guest address space structure
391 * @to: address in the guest address space
392 * @len: length of the memory area to unmap
394 * Returns 0 if the unmap succeeded, -EINVAL if not.
396 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
401 BUG_ON(gmap_is_shadow(gmap));
402 if ((to | len) & (PMD_SIZE - 1))
404 if (len == 0 || to + len < to)
408 down_write(&gmap->mm->mmap_sem);
409 for (off = 0; off < len; off += PMD_SIZE)
410 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
411 up_write(&gmap->mm->mmap_sem);
413 gmap_flush_tlb(gmap);
416 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
419 * gmap_map_segment - map a segment to the guest address space
420 * @gmap: pointer to the guest address space structure
421 * @from: source address in the parent address space
422 * @to: target address in the guest address space
423 * @len: length of the memory area to map
425 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
427 int gmap_map_segment(struct gmap *gmap, unsigned long from,
428 unsigned long to, unsigned long len)
433 BUG_ON(gmap_is_shadow(gmap));
434 if ((from | to | len) & (PMD_SIZE - 1))
436 if (len == 0 || from + len < from || to + len < to ||
437 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
441 down_write(&gmap->mm->mmap_sem);
442 for (off = 0; off < len; off += PMD_SIZE) {
443 /* Remove old translation */
444 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
445 /* Store new translation */
446 if (radix_tree_insert(&gmap->guest_to_host,
447 (to + off) >> PMD_SHIFT,
448 (void *) from + off))
451 up_write(&gmap->mm->mmap_sem);
453 gmap_flush_tlb(gmap);
456 gmap_unmap_segment(gmap, to, len);
459 EXPORT_SYMBOL_GPL(gmap_map_segment);
462 * __gmap_translate - translate a guest address to a user space address
463 * @gmap: pointer to guest mapping meta data structure
464 * @gaddr: guest address
466 * Returns user space address which corresponds to the guest address or
467 * -EFAULT if no such mapping exists.
468 * This function does not establish potentially missing page table entries.
469 * The mmap_sem of the mm that belongs to the address space must be held
470 * when this function gets called.
472 * Note: Can also be called for shadow gmaps.
474 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
476 unsigned long vmaddr;
478 vmaddr = (unsigned long)
479 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
480 /* Note: guest_to_host is empty for a shadow gmap */
481 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
483 EXPORT_SYMBOL_GPL(__gmap_translate);
486 * gmap_translate - translate a guest address to a user space address
487 * @gmap: pointer to guest mapping meta data structure
488 * @gaddr: guest address
490 * Returns user space address which corresponds to the guest address or
491 * -EFAULT if no such mapping exists.
492 * This function does not establish potentially missing page table entries.
494 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
498 down_read(&gmap->mm->mmap_sem);
499 rc = __gmap_translate(gmap, gaddr);
500 up_read(&gmap->mm->mmap_sem);
503 EXPORT_SYMBOL_GPL(gmap_translate);
506 * gmap_unlink - disconnect a page table from the gmap shadow tables
507 * @gmap: pointer to guest mapping meta data structure
508 * @table: pointer to the host page table
509 * @vmaddr: vm address associated with the host page table
511 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
512 unsigned long vmaddr)
518 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
519 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
521 gmap_flush_tlb(gmap);
526 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
527 unsigned long gaddr);
530 * gmap_link - set up shadow page tables to connect a host to a guest address
531 * @gmap: pointer to guest mapping meta data structure
532 * @gaddr: guest address
533 * @vmaddr: vm address
535 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
536 * if the vm address is already mapped to a different guest segment.
537 * The mmap_sem of the mm that belongs to the address space must be held
538 * when this function gets called.
540 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
542 struct mm_struct *mm;
543 unsigned long *table;
552 BUG_ON(gmap_is_shadow(gmap));
553 /* Create higher level tables in the gmap page table */
555 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
556 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
557 if ((*table & _REGION_ENTRY_INVALID) &&
558 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
559 gaddr & _REGION1_MASK))
561 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
563 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
564 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
565 if ((*table & _REGION_ENTRY_INVALID) &&
566 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
567 gaddr & _REGION2_MASK))
569 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
571 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
572 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
573 if ((*table & _REGION_ENTRY_INVALID) &&
574 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
575 gaddr & _REGION3_MASK))
577 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
579 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
580 /* Walk the parent mm page table */
582 pgd = pgd_offset(mm, vmaddr);
583 VM_BUG_ON(pgd_none(*pgd));
584 p4d = p4d_offset(pgd, vmaddr);
585 VM_BUG_ON(p4d_none(*p4d));
586 pud = pud_offset(p4d, vmaddr);
587 VM_BUG_ON(pud_none(*pud));
588 /* large puds cannot yet be handled */
591 pmd = pmd_offset(pud, vmaddr);
592 VM_BUG_ON(pmd_none(*pmd));
593 /* Are we allowed to use huge pages? */
594 if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
596 /* Link gmap segment table entry location to page table. */
597 rc = radix_tree_preload(GFP_KERNEL);
600 ptl = pmd_lock(mm, pmd);
601 spin_lock(&gmap->guest_table_lock);
602 if (*table == _SEGMENT_ENTRY_EMPTY) {
603 rc = radix_tree_insert(&gmap->host_to_guest,
604 vmaddr >> PMD_SHIFT, table);
606 if (pmd_large(*pmd)) {
607 *table = (pmd_val(*pmd) &
608 _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
609 | _SEGMENT_ENTRY_GMAP_UC;
611 *table = pmd_val(*pmd) &
612 _SEGMENT_ENTRY_HARDWARE_BITS;
614 } else if (*table & _SEGMENT_ENTRY_PROTECT &&
615 !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
616 unprot = (u64)*table;
617 unprot &= ~_SEGMENT_ENTRY_PROTECT;
618 unprot |= _SEGMENT_ENTRY_GMAP_UC;
619 gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
621 spin_unlock(&gmap->guest_table_lock);
623 radix_tree_preload_end();
628 * gmap_fault - resolve a fault on a guest address
629 * @gmap: pointer to guest mapping meta data structure
630 * @gaddr: guest address
631 * @fault_flags: flags to pass down to handle_mm_fault()
633 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
634 * if the vm address is already mapped to a different guest segment.
636 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
637 unsigned int fault_flags)
639 unsigned long vmaddr;
643 down_read(&gmap->mm->mmap_sem);
647 vmaddr = __gmap_translate(gmap, gaddr);
648 if (IS_ERR_VALUE(vmaddr)) {
652 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
658 * In the case that fixup_user_fault unlocked the mmap_sem during
659 * faultin redo __gmap_translate to not race with a map/unmap_segment.
664 rc = __gmap_link(gmap, gaddr, vmaddr);
666 up_read(&gmap->mm->mmap_sem);
669 EXPORT_SYMBOL_GPL(gmap_fault);
672 * this function is assumed to be called with mmap_sem held
674 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
676 unsigned long vmaddr;
680 /* Find the vm address for the guest address */
681 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
684 vmaddr |= gaddr & ~PMD_MASK;
685 /* Get pointer to the page table entry */
686 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
688 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
689 pte_unmap_unlock(ptep, ptl);
692 EXPORT_SYMBOL_GPL(__gmap_zap);
694 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
696 unsigned long gaddr, vmaddr, size;
697 struct vm_area_struct *vma;
699 down_read(&gmap->mm->mmap_sem);
700 for (gaddr = from; gaddr < to;
701 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
702 /* Find the vm address for the guest address */
703 vmaddr = (unsigned long)
704 radix_tree_lookup(&gmap->guest_to_host,
708 vmaddr |= gaddr & ~PMD_MASK;
709 /* Find vma in the parent mm */
710 vma = find_vma(gmap->mm, vmaddr);
712 * We do not discard pages that are backed by
713 * hugetlbfs, so we don't have to refault them.
715 if (vma && is_vm_hugetlb_page(vma))
717 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
718 zap_page_range(vma, vmaddr, size);
720 up_read(&gmap->mm->mmap_sem);
722 EXPORT_SYMBOL_GPL(gmap_discard);
724 static LIST_HEAD(gmap_notifier_list);
725 static DEFINE_SPINLOCK(gmap_notifier_lock);
728 * gmap_register_pte_notifier - register a pte invalidation callback
729 * @nb: pointer to the gmap notifier block
731 void gmap_register_pte_notifier(struct gmap_notifier *nb)
733 spin_lock(&gmap_notifier_lock);
734 list_add_rcu(&nb->list, &gmap_notifier_list);
735 spin_unlock(&gmap_notifier_lock);
737 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
740 * gmap_unregister_pte_notifier - remove a pte invalidation callback
741 * @nb: pointer to the gmap notifier block
743 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
745 spin_lock(&gmap_notifier_lock);
746 list_del_rcu(&nb->list);
747 spin_unlock(&gmap_notifier_lock);
750 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
753 * gmap_call_notifier - call all registered invalidation callbacks
754 * @gmap: pointer to guest mapping meta data structure
755 * @start: start virtual address in the guest address space
756 * @end: end virtual address in the guest address space
758 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
761 struct gmap_notifier *nb;
763 list_for_each_entry(nb, &gmap_notifier_list, list)
764 nb->notifier_call(gmap, start, end);
768 * gmap_table_walk - walk the gmap page tables
769 * @gmap: pointer to guest mapping meta data structure
770 * @gaddr: virtual address in the guest address space
771 * @level: page table level to stop at
773 * Returns a table entry pointer for the given guest address and @level
774 * @level=0 : returns a pointer to a page table table entry (or NULL)
775 * @level=1 : returns a pointer to a segment table entry (or NULL)
776 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
777 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
778 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
780 * Returns NULL if the gmap page tables could not be walked to the
783 * Note: Can also be called for shadow gmaps.
785 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
786 unsigned long gaddr, int level)
788 unsigned long *table;
790 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
792 if (gmap_is_shadow(gmap) && gmap->removed)
794 if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
797 switch (gmap->asce & _ASCE_TYPE_MASK) {
798 case _ASCE_TYPE_REGION1:
799 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
802 if (*table & _REGION_ENTRY_INVALID)
804 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
806 case _ASCE_TYPE_REGION2:
807 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
810 if (*table & _REGION_ENTRY_INVALID)
812 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
814 case _ASCE_TYPE_REGION3:
815 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
818 if (*table & _REGION_ENTRY_INVALID)
820 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
822 case _ASCE_TYPE_SEGMENT:
823 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
826 if (*table & _REGION_ENTRY_INVALID)
828 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
829 table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
835 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
836 * and return the pte pointer
837 * @gmap: pointer to guest mapping meta data structure
838 * @gaddr: virtual address in the guest address space
839 * @ptl: pointer to the spinlock pointer
841 * Returns a pointer to the locked pte for a guest address, or NULL
843 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
846 unsigned long *table;
848 BUG_ON(gmap_is_shadow(gmap));
849 /* Walk the gmap page table, lock and get pte pointer */
850 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
851 if (!table || *table & _SEGMENT_ENTRY_INVALID)
853 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
857 * gmap_pte_op_fixup - force a page in and connect the gmap page table
858 * @gmap: pointer to guest mapping meta data structure
859 * @gaddr: virtual address in the guest address space
860 * @vmaddr: address in the host process address space
861 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
863 * Returns 0 if the caller can retry __gmap_translate (might fail again),
864 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
865 * up or connecting the gmap page table.
867 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
868 unsigned long vmaddr, int prot)
870 struct mm_struct *mm = gmap->mm;
871 unsigned int fault_flags;
872 bool unlocked = false;
874 BUG_ON(gmap_is_shadow(gmap));
875 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
876 if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
879 /* lost mmap_sem, caller has to retry __gmap_translate */
881 /* Connect the page tables */
882 return __gmap_link(gmap, gaddr, vmaddr);
886 * gmap_pte_op_end - release the page table lock
887 * @ptl: pointer to the spinlock pointer
889 static void gmap_pte_op_end(spinlock_t *ptl)
896 * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
897 * and return the pmd pointer
898 * @gmap: pointer to guest mapping meta data structure
899 * @gaddr: virtual address in the guest address space
901 * Returns a pointer to the pmd for a guest address, or NULL
903 static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
907 BUG_ON(gmap_is_shadow(gmap));
908 spin_lock(&gmap->guest_table_lock);
909 pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
911 if (!pmdp || pmd_none(*pmdp)) {
912 spin_unlock(&gmap->guest_table_lock);
916 /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
917 if (!pmd_large(*pmdp))
918 spin_unlock(&gmap->guest_table_lock);
923 * gmap_pmd_op_end - release the guest_table_lock if needed
924 * @gmap: pointer to the guest mapping meta data structure
925 * @pmdp: pointer to the pmd
927 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
929 if (pmd_large(*pmdp))
930 spin_unlock(&gmap->guest_table_lock);
934 * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
935 * @pmdp: pointer to the pmd to be protected
936 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
937 * @bits: notification bits to set
940 * 0 if successfully protected
941 * -EAGAIN if a fixup is needed
942 * -EINVAL if unsupported notifier bits have been specified
944 * Expected to be called with sg->mm->mmap_sem in read and
945 * guest_table_lock held.
947 static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
948 pmd_t *pmdp, int prot, unsigned long bits)
950 int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
951 int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
955 if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
958 if (prot == PROT_NONE && !pmd_i) {
959 pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
960 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
963 if (prot == PROT_READ && !pmd_p) {
964 pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
965 pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
966 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
969 if (bits & GMAP_NOTIFY_MPROT)
970 pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
972 /* Shadow GMAP protection needs split PMDs */
973 if (bits & GMAP_NOTIFY_SHADOW)
980 * gmap_protect_pte - remove access rights to memory and set pgste bits
981 * @gmap: pointer to guest mapping meta data structure
982 * @gaddr: virtual address in the guest address space
983 * @pmdp: pointer to the pmd associated with the pte
984 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
985 * @bits: notification bits to set
987 * Returns 0 if successfully protected, -ENOMEM if out of memory and
988 * -EAGAIN if a fixup is needed.
990 * Expected to be called with sg->mm->mmap_sem in read
992 static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
993 pmd_t *pmdp, int prot, unsigned long bits)
997 spinlock_t *ptl = NULL;
998 unsigned long pbits = 0;
1000 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1003 ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
1007 pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
1008 pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
1009 /* Protect and unlock. */
1010 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
1011 gmap_pte_op_end(ptl);
1016 * gmap_protect_range - remove access rights to memory and set pgste bits
1017 * @gmap: pointer to guest mapping meta data structure
1018 * @gaddr: virtual address in the guest address space
1019 * @len: size of area
1020 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1021 * @bits: pgste notification bits to set
1023 * Returns 0 if successfully protected, -ENOMEM if out of memory and
1024 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
1026 * Called with sg->mm->mmap_sem in read.
1028 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
1029 unsigned long len, int prot, unsigned long bits)
1031 unsigned long vmaddr, dist;
1035 BUG_ON(gmap_is_shadow(gmap));
1038 pmdp = gmap_pmd_op_walk(gmap, gaddr);
1040 if (!pmd_large(*pmdp)) {
1041 rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
1048 rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
1051 dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
1052 len = len < dist ? 0 : len - dist;
1053 gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
1056 gmap_pmd_op_end(gmap, pmdp);
1062 /* -EAGAIN, fixup of userspace mm and gmap */
1063 vmaddr = __gmap_translate(gmap, gaddr);
1064 if (IS_ERR_VALUE(vmaddr))
1066 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
1075 * gmap_mprotect_notify - change access rights for a range of ptes and
1076 * call the notifier if any pte changes again
1077 * @gmap: pointer to guest mapping meta data structure
1078 * @gaddr: virtual address in the guest address space
1079 * @len: size of area
1080 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1082 * Returns 0 if for each page in the given range a gmap mapping exists,
1083 * the new access rights could be set and the notifier could be armed.
1084 * If the gmap mapping is missing for one or more pages -EFAULT is
1085 * returned. If no memory could be allocated -ENOMEM is returned.
1086 * This function establishes missing page table entries.
1088 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
1089 unsigned long len, int prot)
1093 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
1095 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
1097 down_read(&gmap->mm->mmap_sem);
1098 rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
1099 up_read(&gmap->mm->mmap_sem);
1102 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
1105 * gmap_read_table - get an unsigned long value from a guest page table using
1106 * absolute addressing, without marking the page referenced.
1107 * @gmap: pointer to guest mapping meta data structure
1108 * @gaddr: virtual address in the guest address space
1109 * @val: pointer to the unsigned long value to return
1111 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
1112 * if reading using the virtual address failed. -EINVAL if called on a gmap
1115 * Called with gmap->mm->mmap_sem in read.
1117 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
1119 unsigned long address, vmaddr;
1124 if (gmap_is_shadow(gmap))
1129 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
1132 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
1133 address = pte_val(pte) & PAGE_MASK;
1134 address += gaddr & ~PAGE_MASK;
1135 *val = *(unsigned long *) address;
1136 pte_val(*ptep) |= _PAGE_YOUNG;
1137 /* Do *NOT* clear the _PAGE_INVALID bit! */
1140 gmap_pte_op_end(ptl);
1144 vmaddr = __gmap_translate(gmap, gaddr);
1145 if (IS_ERR_VALUE(vmaddr)) {
1149 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
1155 EXPORT_SYMBOL_GPL(gmap_read_table);
1158 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1159 * @sg: pointer to the shadow guest address space structure
1160 * @vmaddr: vm address associated with the rmap
1161 * @rmap: pointer to the rmap structure
1163 * Called with the sg->guest_table_lock
1165 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1166 struct gmap_rmap *rmap)
1170 BUG_ON(!gmap_is_shadow(sg));
1171 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1173 rmap->next = radix_tree_deref_slot_protected(slot,
1174 &sg->guest_table_lock);
1175 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
1178 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1184 * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
1185 * @sg: pointer to the shadow guest address space structure
1186 * @raddr: rmap address in the shadow gmap
1187 * @paddr: address in the parent guest address space
1188 * @len: length of the memory area to protect
1190 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1191 * if out of memory and -EFAULT if paddr is invalid.
1193 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1194 unsigned long paddr, unsigned long len)
1196 struct gmap *parent;
1197 struct gmap_rmap *rmap;
1198 unsigned long vmaddr;
1203 BUG_ON(!gmap_is_shadow(sg));
1204 parent = sg->parent;
1206 vmaddr = __gmap_translate(parent, paddr);
1207 if (IS_ERR_VALUE(vmaddr))
1209 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1212 rmap->raddr = raddr;
1213 rc = radix_tree_preload(GFP_KERNEL);
1219 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1221 spin_lock(&sg->guest_table_lock);
1222 rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
1225 gmap_insert_rmap(sg, vmaddr, rmap);
1226 spin_unlock(&sg->guest_table_lock);
1227 gmap_pte_op_end(ptl);
1229 radix_tree_preload_end();
1232 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
1243 #define _SHADOW_RMAP_MASK 0x7
1244 #define _SHADOW_RMAP_REGION1 0x5
1245 #define _SHADOW_RMAP_REGION2 0x4
1246 #define _SHADOW_RMAP_REGION3 0x3
1247 #define _SHADOW_RMAP_SEGMENT 0x2
1248 #define _SHADOW_RMAP_PGTABLE 0x1
1251 * gmap_idte_one - invalidate a single region or segment table entry
1252 * @asce: region or segment table *origin* + table-type bits
1253 * @vaddr: virtual address to identify the table entry to flush
1255 * The invalid bit of a single region or segment table entry is set
1256 * and the associated TLB entries depending on the entry are flushed.
1257 * The table-type of the @asce identifies the portion of the @vaddr
1258 * that is used as the invalidation index.
1260 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1263 " .insn rrf,0xb98e0000,%0,%1,0,0"
1264 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1268 * gmap_unshadow_page - remove a page from a shadow page table
1269 * @sg: pointer to the shadow guest address space structure
1270 * @raddr: rmap address in the shadow guest address space
1272 * Called with the sg->guest_table_lock
1274 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1276 unsigned long *table;
1278 BUG_ON(!gmap_is_shadow(sg));
1279 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1280 if (!table || *table & _PAGE_INVALID)
1282 gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
1283 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1287 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1288 * @sg: pointer to the shadow guest address space structure
1289 * @raddr: rmap address in the shadow guest address space
1290 * @pgt: pointer to the start of a shadow page table
1292 * Called with the sg->guest_table_lock
1294 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1299 BUG_ON(!gmap_is_shadow(sg));
1300 for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
1301 pgt[i] = _PAGE_INVALID;
1305 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1306 * @sg: pointer to the shadow guest address space structure
1307 * @raddr: address in the shadow guest address space
1309 * Called with the sg->guest_table_lock
1311 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1313 unsigned long sto, *ste, *pgt;
1316 BUG_ON(!gmap_is_shadow(sg));
1317 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1318 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1320 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1321 sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
1322 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1323 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1324 *ste = _SEGMENT_ENTRY_EMPTY;
1325 __gmap_unshadow_pgt(sg, raddr, pgt);
1326 /* Free page table */
1327 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1328 list_del(&page->lru);
1329 page_table_free_pgste(page);
1333 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1334 * @sg: pointer to the shadow guest address space structure
1335 * @raddr: rmap address in the shadow guest address space
1336 * @sgt: pointer to the start of a shadow segment table
1338 * Called with the sg->guest_table_lock
1340 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1347 BUG_ON(!gmap_is_shadow(sg));
1348 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
1349 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1351 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1352 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1353 __gmap_unshadow_pgt(sg, raddr, pgt);
1354 /* Free page table */
1355 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1356 list_del(&page->lru);
1357 page_table_free_pgste(page);
1362 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1363 * @sg: pointer to the shadow guest address space structure
1364 * @raddr: rmap address in the shadow guest address space
1366 * Called with the shadow->guest_table_lock
1368 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1370 unsigned long r3o, *r3e, *sgt;
1373 BUG_ON(!gmap_is_shadow(sg));
1374 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1375 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1377 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1378 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
1379 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1380 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1381 *r3e = _REGION3_ENTRY_EMPTY;
1382 __gmap_unshadow_sgt(sg, raddr, sgt);
1383 /* Free segment table */
1384 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1385 list_del(&page->lru);
1386 __free_pages(page, CRST_ALLOC_ORDER);
1390 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1391 * @sg: pointer to the shadow guest address space structure
1392 * @raddr: address in the shadow guest address space
1393 * @r3t: pointer to the start of a shadow region-3 table
1395 * Called with the sg->guest_table_lock
1397 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1404 BUG_ON(!gmap_is_shadow(sg));
1405 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
1406 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1408 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1409 r3t[i] = _REGION3_ENTRY_EMPTY;
1410 __gmap_unshadow_sgt(sg, raddr, sgt);
1411 /* Free segment table */
1412 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1413 list_del(&page->lru);
1414 __free_pages(page, CRST_ALLOC_ORDER);
1419 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1420 * @sg: pointer to the shadow guest address space structure
1421 * @raddr: rmap address in the shadow guest address space
1423 * Called with the sg->guest_table_lock
1425 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1427 unsigned long r2o, *r2e, *r3t;
1430 BUG_ON(!gmap_is_shadow(sg));
1431 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1432 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1434 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1435 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
1436 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1437 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1438 *r2e = _REGION2_ENTRY_EMPTY;
1439 __gmap_unshadow_r3t(sg, raddr, r3t);
1440 /* Free region 3 table */
1441 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1442 list_del(&page->lru);
1443 __free_pages(page, CRST_ALLOC_ORDER);
1447 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1448 * @sg: pointer to the shadow guest address space structure
1449 * @raddr: rmap address in the shadow guest address space
1450 * @r2t: pointer to the start of a shadow region-2 table
1452 * Called with the sg->guest_table_lock
1454 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1461 BUG_ON(!gmap_is_shadow(sg));
1462 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
1463 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1465 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1466 r2t[i] = _REGION2_ENTRY_EMPTY;
1467 __gmap_unshadow_r3t(sg, raddr, r3t);
1468 /* Free region 3 table */
1469 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1470 list_del(&page->lru);
1471 __free_pages(page, CRST_ALLOC_ORDER);
1476 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1477 * @sg: pointer to the shadow guest address space structure
1478 * @raddr: rmap address in the shadow guest address space
1480 * Called with the sg->guest_table_lock
1482 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1484 unsigned long r1o, *r1e, *r2t;
1487 BUG_ON(!gmap_is_shadow(sg));
1488 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1489 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1491 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1492 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
1493 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1494 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1495 *r1e = _REGION1_ENTRY_EMPTY;
1496 __gmap_unshadow_r2t(sg, raddr, r2t);
1497 /* Free region 2 table */
1498 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1499 list_del(&page->lru);
1500 __free_pages(page, CRST_ALLOC_ORDER);
1504 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1505 * @sg: pointer to the shadow guest address space structure
1506 * @raddr: rmap address in the shadow guest address space
1507 * @r1t: pointer to the start of a shadow region-1 table
1509 * Called with the shadow->guest_table_lock
1511 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1514 unsigned long asce, *r2t;
1518 BUG_ON(!gmap_is_shadow(sg));
1519 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1520 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
1521 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1523 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1524 __gmap_unshadow_r2t(sg, raddr, r2t);
1525 /* Clear entry and flush translation r1t -> r2t */
1526 gmap_idte_one(asce, raddr);
1527 r1t[i] = _REGION1_ENTRY_EMPTY;
1528 /* Free region 2 table */
1529 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1530 list_del(&page->lru);
1531 __free_pages(page, CRST_ALLOC_ORDER);
1536 * gmap_unshadow - remove a shadow page table completely
1537 * @sg: pointer to the shadow guest address space structure
1539 * Called with sg->guest_table_lock
1541 static void gmap_unshadow(struct gmap *sg)
1543 unsigned long *table;
1545 BUG_ON(!gmap_is_shadow(sg));
1549 gmap_call_notifier(sg, 0, -1UL);
1551 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1552 switch (sg->asce & _ASCE_TYPE_MASK) {
1553 case _ASCE_TYPE_REGION1:
1554 __gmap_unshadow_r1t(sg, 0, table);
1556 case _ASCE_TYPE_REGION2:
1557 __gmap_unshadow_r2t(sg, 0, table);
1559 case _ASCE_TYPE_REGION3:
1560 __gmap_unshadow_r3t(sg, 0, table);
1562 case _ASCE_TYPE_SEGMENT:
1563 __gmap_unshadow_sgt(sg, 0, table);
1569 * gmap_find_shadow - find a specific asce in the list of shadow tables
1570 * @parent: pointer to the parent gmap
1571 * @asce: ASCE for which the shadow table is created
1572 * @edat_level: edat level to be used for the shadow translation
1574 * Returns the pointer to a gmap if a shadow table with the given asce is
1575 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1578 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1583 list_for_each_entry(sg, &parent->children, list) {
1584 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1587 if (!sg->initialized)
1588 return ERR_PTR(-EAGAIN);
1589 atomic_inc(&sg->ref_count);
1596 * gmap_shadow_valid - check if a shadow guest address space matches the
1597 * given properties and is still valid
1598 * @sg: pointer to the shadow guest address space structure
1599 * @asce: ASCE for which the shadow table is requested
1600 * @edat_level: edat level to be used for the shadow translation
1602 * Returns 1 if the gmap shadow is still valid and matches the given
1603 * properties, the caller can continue using it. Returns 0 otherwise, the
1604 * caller has to request a new shadow gmap in this case.
1607 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1611 return sg->orig_asce == asce && sg->edat_level == edat_level;
1613 EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1616 * gmap_shadow - create/find a shadow guest address space
1617 * @parent: pointer to the parent gmap
1618 * @asce: ASCE for which the shadow table is created
1619 * @edat_level: edat level to be used for the shadow translation
1621 * The pages of the top level page table referred by the asce parameter
1622 * will be set to read-only and marked in the PGSTEs of the kvm process.
1623 * The shadow table will be removed automatically on any change to the
1624 * PTE mapping for the source table.
1626 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1627 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1628 * parent gmap table could not be protected.
1630 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1633 struct gmap *sg, *new;
1634 unsigned long limit;
1637 BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
1638 BUG_ON(gmap_is_shadow(parent));
1639 spin_lock(&parent->shadow_lock);
1640 sg = gmap_find_shadow(parent, asce, edat_level);
1641 spin_unlock(&parent->shadow_lock);
1644 /* Create a new shadow gmap */
1645 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1646 if (asce & _ASCE_REAL_SPACE)
1648 new = gmap_alloc(limit);
1650 return ERR_PTR(-ENOMEM);
1651 new->mm = parent->mm;
1652 new->parent = gmap_get(parent);
1653 new->orig_asce = asce;
1654 new->edat_level = edat_level;
1655 new->initialized = false;
1656 spin_lock(&parent->shadow_lock);
1657 /* Recheck if another CPU created the same shadow */
1658 sg = gmap_find_shadow(parent, asce, edat_level);
1660 spin_unlock(&parent->shadow_lock);
1664 if (asce & _ASCE_REAL_SPACE) {
1665 /* only allow one real-space gmap shadow */
1666 list_for_each_entry(sg, &parent->children, list) {
1667 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1668 spin_lock(&sg->guest_table_lock);
1670 spin_unlock(&sg->guest_table_lock);
1671 list_del(&sg->list);
1677 atomic_set(&new->ref_count, 2);
1678 list_add(&new->list, &parent->children);
1679 if (asce & _ASCE_REAL_SPACE) {
1680 /* nothing to protect, return right away */
1681 new->initialized = true;
1682 spin_unlock(&parent->shadow_lock);
1685 spin_unlock(&parent->shadow_lock);
1686 /* protect after insertion, so it will get properly invalidated */
1687 down_read(&parent->mm->mmap_sem);
1688 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1689 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
1690 PROT_READ, GMAP_NOTIFY_SHADOW);
1691 up_read(&parent->mm->mmap_sem);
1692 spin_lock(&parent->shadow_lock);
1693 new->initialized = true;
1695 list_del(&new->list);
1699 spin_unlock(&parent->shadow_lock);
1702 EXPORT_SYMBOL_GPL(gmap_shadow);
1705 * gmap_shadow_r2t - create an empty shadow region 2 table
1706 * @sg: pointer to the shadow guest address space structure
1707 * @saddr: faulting address in the shadow gmap
1708 * @r2t: parent gmap address of the region 2 table to get shadowed
1709 * @fake: r2t references contiguous guest memory block, not a r2t
1711 * The r2t parameter specifies the address of the source table. The
1712 * four pages of the source table are made read-only in the parent gmap
1713 * address space. A write to the source table area @r2t will automatically
1714 * remove the shadow r2 table and all of its decendents.
1716 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1717 * shadow table structure is incomplete, -ENOMEM if out of memory and
1718 * -EFAULT if an address in the parent gmap could not be resolved.
1720 * Called with sg->mm->mmap_sem in read.
1722 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1725 unsigned long raddr, origin, offset, len;
1726 unsigned long *s_r2t, *table;
1730 BUG_ON(!gmap_is_shadow(sg));
1731 /* Allocate a shadow region second table */
1732 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1735 page->index = r2t & _REGION_ENTRY_ORIGIN;
1737 page->index |= GMAP_SHADOW_FAKE_TABLE;
1738 s_r2t = (unsigned long *) page_to_phys(page);
1739 /* Install shadow region second table */
1740 spin_lock(&sg->guest_table_lock);
1741 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1743 rc = -EAGAIN; /* Race with unshadow */
1746 if (!(*table & _REGION_ENTRY_INVALID)) {
1747 rc = 0; /* Already established */
1749 } else if (*table & _REGION_ENTRY_ORIGIN) {
1750 rc = -EAGAIN; /* Race with shadow */
1753 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1754 /* mark as invalid as long as the parent table is not protected */
1755 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1756 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1757 if (sg->edat_level >= 1)
1758 *table |= (r2t & _REGION_ENTRY_PROTECT);
1759 list_add(&page->lru, &sg->crst_list);
1761 /* nothing to protect for fake tables */
1762 *table &= ~_REGION_ENTRY_INVALID;
1763 spin_unlock(&sg->guest_table_lock);
1766 spin_unlock(&sg->guest_table_lock);
1767 /* Make r2t read-only in parent gmap page table */
1768 raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
1769 origin = r2t & _REGION_ENTRY_ORIGIN;
1770 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1771 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1772 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1773 spin_lock(&sg->guest_table_lock);
1775 table = gmap_table_walk(sg, saddr, 4);
1776 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1777 (unsigned long) s_r2t)
1778 rc = -EAGAIN; /* Race with unshadow */
1780 *table &= ~_REGION_ENTRY_INVALID;
1782 gmap_unshadow_r2t(sg, raddr);
1784 spin_unlock(&sg->guest_table_lock);
1787 spin_unlock(&sg->guest_table_lock);
1788 __free_pages(page, CRST_ALLOC_ORDER);
1791 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1794 * gmap_shadow_r3t - create a shadow region 3 table
1795 * @sg: pointer to the shadow guest address space structure
1796 * @saddr: faulting address in the shadow gmap
1797 * @r3t: parent gmap address of the region 3 table to get shadowed
1798 * @fake: r3t references contiguous guest memory block, not a r3t
1800 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1801 * shadow table structure is incomplete, -ENOMEM if out of memory and
1802 * -EFAULT if an address in the parent gmap could not be resolved.
1804 * Called with sg->mm->mmap_sem in read.
1806 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1809 unsigned long raddr, origin, offset, len;
1810 unsigned long *s_r3t, *table;
1814 BUG_ON(!gmap_is_shadow(sg));
1815 /* Allocate a shadow region second table */
1816 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1819 page->index = r3t & _REGION_ENTRY_ORIGIN;
1821 page->index |= GMAP_SHADOW_FAKE_TABLE;
1822 s_r3t = (unsigned long *) page_to_phys(page);
1823 /* Install shadow region second table */
1824 spin_lock(&sg->guest_table_lock);
1825 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1827 rc = -EAGAIN; /* Race with unshadow */
1830 if (!(*table & _REGION_ENTRY_INVALID)) {
1831 rc = 0; /* Already established */
1833 } else if (*table & _REGION_ENTRY_ORIGIN) {
1834 rc = -EAGAIN; /* Race with shadow */
1836 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1837 /* mark as invalid as long as the parent table is not protected */
1838 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1839 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1840 if (sg->edat_level >= 1)
1841 *table |= (r3t & _REGION_ENTRY_PROTECT);
1842 list_add(&page->lru, &sg->crst_list);
1844 /* nothing to protect for fake tables */
1845 *table &= ~_REGION_ENTRY_INVALID;
1846 spin_unlock(&sg->guest_table_lock);
1849 spin_unlock(&sg->guest_table_lock);
1850 /* Make r3t read-only in parent gmap page table */
1851 raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
1852 origin = r3t & _REGION_ENTRY_ORIGIN;
1853 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1854 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1855 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1856 spin_lock(&sg->guest_table_lock);
1858 table = gmap_table_walk(sg, saddr, 3);
1859 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1860 (unsigned long) s_r3t)
1861 rc = -EAGAIN; /* Race with unshadow */
1863 *table &= ~_REGION_ENTRY_INVALID;
1865 gmap_unshadow_r3t(sg, raddr);
1867 spin_unlock(&sg->guest_table_lock);
1870 spin_unlock(&sg->guest_table_lock);
1871 __free_pages(page, CRST_ALLOC_ORDER);
1874 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1877 * gmap_shadow_sgt - create a shadow segment table
1878 * @sg: pointer to the shadow guest address space structure
1879 * @saddr: faulting address in the shadow gmap
1880 * @sgt: parent gmap address of the segment table to get shadowed
1881 * @fake: sgt references contiguous guest memory block, not a sgt
1883 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1884 * shadow table structure is incomplete, -ENOMEM if out of memory and
1885 * -EFAULT if an address in the parent gmap could not be resolved.
1887 * Called with sg->mm->mmap_sem in read.
1889 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1892 unsigned long raddr, origin, offset, len;
1893 unsigned long *s_sgt, *table;
1897 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1898 /* Allocate a shadow segment table */
1899 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1902 page->index = sgt & _REGION_ENTRY_ORIGIN;
1904 page->index |= GMAP_SHADOW_FAKE_TABLE;
1905 s_sgt = (unsigned long *) page_to_phys(page);
1906 /* Install shadow region second table */
1907 spin_lock(&sg->guest_table_lock);
1908 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1910 rc = -EAGAIN; /* Race with unshadow */
1913 if (!(*table & _REGION_ENTRY_INVALID)) {
1914 rc = 0; /* Already established */
1916 } else if (*table & _REGION_ENTRY_ORIGIN) {
1917 rc = -EAGAIN; /* Race with shadow */
1920 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1921 /* mark as invalid as long as the parent table is not protected */
1922 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1923 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1924 if (sg->edat_level >= 1)
1925 *table |= sgt & _REGION_ENTRY_PROTECT;
1926 list_add(&page->lru, &sg->crst_list);
1928 /* nothing to protect for fake tables */
1929 *table &= ~_REGION_ENTRY_INVALID;
1930 spin_unlock(&sg->guest_table_lock);
1933 spin_unlock(&sg->guest_table_lock);
1934 /* Make sgt read-only in parent gmap page table */
1935 raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
1936 origin = sgt & _REGION_ENTRY_ORIGIN;
1937 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1938 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1939 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1940 spin_lock(&sg->guest_table_lock);
1942 table = gmap_table_walk(sg, saddr, 2);
1943 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1944 (unsigned long) s_sgt)
1945 rc = -EAGAIN; /* Race with unshadow */
1947 *table &= ~_REGION_ENTRY_INVALID;
1949 gmap_unshadow_sgt(sg, raddr);
1951 spin_unlock(&sg->guest_table_lock);
1954 spin_unlock(&sg->guest_table_lock);
1955 __free_pages(page, CRST_ALLOC_ORDER);
1958 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1961 * gmap_shadow_lookup_pgtable - find a shadow page table
1962 * @sg: pointer to the shadow guest address space structure
1963 * @saddr: the address in the shadow aguest address space
1964 * @pgt: parent gmap address of the page table to get shadowed
1965 * @dat_protection: if the pgtable is marked as protected by dat
1966 * @fake: pgt references contiguous guest memory block, not a pgtable
1968 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1969 * table was not found.
1971 * Called with sg->mm->mmap_sem in read.
1973 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1974 unsigned long *pgt, int *dat_protection,
1977 unsigned long *table;
1981 BUG_ON(!gmap_is_shadow(sg));
1982 spin_lock(&sg->guest_table_lock);
1983 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1984 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1985 /* Shadow page tables are full pages (pte+pgste) */
1986 page = pfn_to_page(*table >> PAGE_SHIFT);
1987 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
1988 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1989 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
1994 spin_unlock(&sg->guest_table_lock);
1998 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
2001 * gmap_shadow_pgt - instantiate a shadow page table
2002 * @sg: pointer to the shadow guest address space structure
2003 * @saddr: faulting address in the shadow gmap
2004 * @pgt: parent gmap address of the page table to get shadowed
2005 * @fake: pgt references contiguous guest memory block, not a pgtable
2007 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2008 * shadow table structure is incomplete, -ENOMEM if out of memory,
2009 * -EFAULT if an address in the parent gmap could not be resolved and
2011 * Called with gmap->mm->mmap_sem in read
2013 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
2016 unsigned long raddr, origin;
2017 unsigned long *s_pgt, *table;
2021 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
2022 /* Allocate a shadow page table */
2023 page = page_table_alloc_pgste(sg->mm);
2026 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
2028 page->index |= GMAP_SHADOW_FAKE_TABLE;
2029 s_pgt = (unsigned long *) page_to_phys(page);
2030 /* Install shadow page table */
2031 spin_lock(&sg->guest_table_lock);
2032 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
2034 rc = -EAGAIN; /* Race with unshadow */
2037 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
2038 rc = 0; /* Already established */
2040 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
2041 rc = -EAGAIN; /* Race with shadow */
2044 /* mark as invalid as long as the parent table is not protected */
2045 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
2046 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
2047 list_add(&page->lru, &sg->pt_list);
2049 /* nothing to protect for fake tables */
2050 *table &= ~_SEGMENT_ENTRY_INVALID;
2051 spin_unlock(&sg->guest_table_lock);
2054 spin_unlock(&sg->guest_table_lock);
2055 /* Make pgt read-only in parent gmap page table (not the pgste) */
2056 raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
2057 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
2058 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
2059 spin_lock(&sg->guest_table_lock);
2061 table = gmap_table_walk(sg, saddr, 1);
2062 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
2063 (unsigned long) s_pgt)
2064 rc = -EAGAIN; /* Race with unshadow */
2066 *table &= ~_SEGMENT_ENTRY_INVALID;
2068 gmap_unshadow_pgt(sg, raddr);
2070 spin_unlock(&sg->guest_table_lock);
2073 spin_unlock(&sg->guest_table_lock);
2074 page_table_free_pgste(page);
2078 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
2081 * gmap_shadow_page - create a shadow page mapping
2082 * @sg: pointer to the shadow guest address space structure
2083 * @saddr: faulting address in the shadow gmap
2084 * @pte: pte in parent gmap address space to get shadowed
2086 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2087 * shadow table structure is incomplete, -ENOMEM if out of memory and
2088 * -EFAULT if an address in the parent gmap could not be resolved.
2090 * Called with sg->mm->mmap_sem in read.
2092 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
2094 struct gmap *parent;
2095 struct gmap_rmap *rmap;
2096 unsigned long vmaddr, paddr;
2098 pte_t *sptep, *tptep;
2102 BUG_ON(!gmap_is_shadow(sg));
2103 parent = sg->parent;
2104 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
2106 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
2109 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
2112 paddr = pte_val(pte) & PAGE_MASK;
2113 vmaddr = __gmap_translate(parent, paddr);
2114 if (IS_ERR_VALUE(vmaddr)) {
2118 rc = radix_tree_preload(GFP_KERNEL);
2122 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
2124 spin_lock(&sg->guest_table_lock);
2125 /* Get page table pointer */
2126 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
2128 spin_unlock(&sg->guest_table_lock);
2129 gmap_pte_op_end(ptl);
2130 radix_tree_preload_end();
2133 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
2135 /* Success and a new mapping */
2136 gmap_insert_rmap(sg, vmaddr, rmap);
2140 gmap_pte_op_end(ptl);
2141 spin_unlock(&sg->guest_table_lock);
2143 radix_tree_preload_end();
2146 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
2153 EXPORT_SYMBOL_GPL(gmap_shadow_page);
2156 * gmap_shadow_notify - handle notifications for shadow gmap
2158 * Called with sg->parent->shadow_lock.
2160 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2161 unsigned long gaddr)
2163 struct gmap_rmap *rmap, *rnext, *head;
2164 unsigned long start, end, bits, raddr;
2166 BUG_ON(!gmap_is_shadow(sg));
2168 spin_lock(&sg->guest_table_lock);
2170 spin_unlock(&sg->guest_table_lock);
2173 /* Check for top level table */
2174 start = sg->orig_asce & _ASCE_ORIGIN;
2175 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
2176 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2178 /* The complete shadow table has to go */
2180 spin_unlock(&sg->guest_table_lock);
2181 list_del(&sg->list);
2185 /* Remove the page table tree from on specific entry */
2186 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
2187 gmap_for_each_rmap_safe(rmap, rnext, head) {
2188 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2189 raddr = rmap->raddr ^ bits;
2191 case _SHADOW_RMAP_REGION1:
2192 gmap_unshadow_r2t(sg, raddr);
2194 case _SHADOW_RMAP_REGION2:
2195 gmap_unshadow_r3t(sg, raddr);
2197 case _SHADOW_RMAP_REGION3:
2198 gmap_unshadow_sgt(sg, raddr);
2200 case _SHADOW_RMAP_SEGMENT:
2201 gmap_unshadow_pgt(sg, raddr);
2203 case _SHADOW_RMAP_PGTABLE:
2204 gmap_unshadow_page(sg, raddr);
2209 spin_unlock(&sg->guest_table_lock);
2213 * ptep_notify - call all invalidation callbacks for a specific pte.
2214 * @mm: pointer to the process mm_struct
2215 * @addr: virtual address in the process address space
2216 * @pte: pointer to the page table entry
2217 * @bits: bits from the pgste that caused the notify call
2219 * This function is assumed to be called with the page table lock held
2220 * for the pte to notify.
2222 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2223 pte_t *pte, unsigned long bits)
2225 unsigned long offset, gaddr = 0;
2226 unsigned long *table;
2227 struct gmap *gmap, *sg, *next;
2229 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2230 offset = offset * (PAGE_SIZE / sizeof(pte_t));
2232 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2233 spin_lock(&gmap->guest_table_lock);
2234 table = radix_tree_lookup(&gmap->host_to_guest,
2235 vmaddr >> PMD_SHIFT);
2237 gaddr = __gmap_segment_gaddr(table) + offset;
2238 spin_unlock(&gmap->guest_table_lock);
2242 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2243 spin_lock(&gmap->shadow_lock);
2244 list_for_each_entry_safe(sg, next,
2245 &gmap->children, list)
2246 gmap_shadow_notify(sg, vmaddr, gaddr);
2247 spin_unlock(&gmap->shadow_lock);
2249 if (bits & PGSTE_IN_BIT)
2250 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2254 EXPORT_SYMBOL_GPL(ptep_notify);
2256 static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
2257 unsigned long gaddr)
2259 pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
2260 gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
2264 * gmap_pmdp_xchg - exchange a gmap pmd with another
2265 * @gmap: pointer to the guest address space structure
2266 * @pmdp: pointer to the pmd entry
2267 * @new: replacement entry
2268 * @gaddr: the affected guest address
2270 * This function is assumed to be called with the guest_table_lock
2273 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
2274 unsigned long gaddr)
2276 gaddr &= HPAGE_MASK;
2277 pmdp_notify_gmap(gmap, pmdp, gaddr);
2278 pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
2279 if (MACHINE_HAS_TLB_GUEST)
2280 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
2282 else if (MACHINE_HAS_IDTE)
2283 __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
2289 static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
2294 unsigned long gaddr;
2297 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2298 spin_lock(&gmap->guest_table_lock);
2299 pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
2300 vmaddr >> PMD_SHIFT);
2302 gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
2303 pmdp_notify_gmap(gmap, pmdp, gaddr);
2304 WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2305 _SEGMENT_ENTRY_GMAP_UC));
2308 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
2310 spin_unlock(&gmap->guest_table_lock);
2316 * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
2318 * @mm: pointer to the process mm_struct
2319 * @vmaddr: virtual address in the process address space
2321 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
2323 gmap_pmdp_clear(mm, vmaddr, 0);
2325 EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
2328 * gmap_pmdp_csp - csp all affected guest pmd entries
2329 * @mm: pointer to the process mm_struct
2330 * @vmaddr: virtual address in the process address space
2332 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
2334 gmap_pmdp_clear(mm, vmaddr, 1);
2336 EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
2339 * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
2340 * @mm: pointer to the process mm_struct
2341 * @vmaddr: virtual address in the process address space
2343 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
2345 unsigned long *entry, gaddr;
2350 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2351 spin_lock(&gmap->guest_table_lock);
2352 entry = radix_tree_delete(&gmap->host_to_guest,
2353 vmaddr >> PMD_SHIFT);
2355 pmdp = (pmd_t *)entry;
2356 gaddr = __gmap_segment_gaddr(entry);
2357 pmdp_notify_gmap(gmap, pmdp, gaddr);
2358 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2359 _SEGMENT_ENTRY_GMAP_UC));
2360 if (MACHINE_HAS_TLB_GUEST)
2361 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2362 gmap->asce, IDTE_LOCAL);
2363 else if (MACHINE_HAS_IDTE)
2364 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
2365 *entry = _SEGMENT_ENTRY_EMPTY;
2367 spin_unlock(&gmap->guest_table_lock);
2371 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
2374 * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
2375 * @mm: pointer to the process mm_struct
2376 * @vmaddr: virtual address in the process address space
2378 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
2380 unsigned long *entry, gaddr;
2385 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2386 spin_lock(&gmap->guest_table_lock);
2387 entry = radix_tree_delete(&gmap->host_to_guest,
2388 vmaddr >> PMD_SHIFT);
2390 pmdp = (pmd_t *)entry;
2391 gaddr = __gmap_segment_gaddr(entry);
2392 pmdp_notify_gmap(gmap, pmdp, gaddr);
2393 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2394 _SEGMENT_ENTRY_GMAP_UC));
2395 if (MACHINE_HAS_TLB_GUEST)
2396 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2397 gmap->asce, IDTE_GLOBAL);
2398 else if (MACHINE_HAS_IDTE)
2399 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
2402 *entry = _SEGMENT_ENTRY_EMPTY;
2404 spin_unlock(&gmap->guest_table_lock);
2408 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
2411 * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
2412 * @gmap: pointer to guest address space
2413 * @pmdp: pointer to the pmd to be tested
2414 * @gaddr: virtual address in the guest address space
2416 * This function is assumed to be called with the guest_table_lock
2419 bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
2420 unsigned long gaddr)
2422 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
2425 /* Already protected memory, which did not change is clean */
2426 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
2427 !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
2430 /* Clear UC indication and reset protection */
2431 pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
2432 gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
2437 * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
2438 * @gmap: pointer to guest address space
2439 * @bitmap: dirty bitmap for this pmd
2440 * @gaddr: virtual address in the guest address space
2441 * @vmaddr: virtual address in the host address space
2443 * This function is assumed to be called with the guest_table_lock
2446 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
2447 unsigned long gaddr, unsigned long vmaddr)
2454 pmdp = gmap_pmd_op_walk(gmap, gaddr);
2458 if (pmd_large(*pmdp)) {
2459 if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
2460 bitmap_fill(bitmap, _PAGE_ENTRIES);
2462 for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
2463 ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
2466 if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
2471 gmap_pmd_op_end(gmap, pmdp);
2473 EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
2475 static inline void thp_split_mm(struct mm_struct *mm)
2477 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2478 struct vm_area_struct *vma;
2481 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2482 for (addr = vma->vm_start;
2485 follow_page(vma, addr, FOLL_SPLIT);
2486 vma->vm_flags &= ~VM_HUGEPAGE;
2487 vma->vm_flags |= VM_NOHUGEPAGE;
2489 mm->def_flags |= VM_NOHUGEPAGE;
2494 * Remove all empty zero pages from the mapping for lazy refaulting
2495 * - This must be called after mm->context.has_pgste is set, to avoid
2496 * future creation of zero pages
2497 * - This must be called after THP was enabled
2499 static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2500 unsigned long end, struct mm_walk *walk)
2504 for (addr = start; addr != end; addr += PAGE_SIZE) {
2508 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2509 if (is_zero_pfn(pte_pfn(*ptep)))
2510 ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2511 pte_unmap_unlock(ptep, ptl);
2516 static inline void zap_zero_pages(struct mm_struct *mm)
2518 struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
2521 walk_page_range(0, TASK_SIZE, &walk);
2525 * switch on pgstes for its userspace process (for kvm)
2527 int s390_enable_sie(void)
2529 struct mm_struct *mm = current->mm;
2531 /* Do we have pgstes? if yes, we are done */
2532 if (mm_has_pgste(mm))
2534 /* Fail if the page tables are 2K */
2535 if (!mm_alloc_pgste(mm))
2537 down_write(&mm->mmap_sem);
2538 mm->context.has_pgste = 1;
2539 /* split thp mappings and disable thp for future mappings */
2542 up_write(&mm->mmap_sem);
2545 EXPORT_SYMBOL_GPL(s390_enable_sie);
2548 * Enable storage key handling from now on and initialize the storage
2549 * keys with the default key.
2551 static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
2552 unsigned long next, struct mm_walk *walk)
2554 /* Clear storage key */
2555 ptep_zap_key(walk->mm, addr, pte);
2559 static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
2560 unsigned long hmask, unsigned long next,
2561 struct mm_walk *walk)
2563 pmd_t *pmd = (pmd_t *)pte;
2564 unsigned long start, end;
2565 struct page *page = pmd_page(*pmd);
2568 * The write check makes sure we do not set a key on shared
2569 * memory. This is needed as the walker does not differentiate
2570 * between actual guest memory and the process executable or
2573 if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
2574 !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
2577 start = pmd_val(*pmd) & HPAGE_MASK;
2578 end = start + HPAGE_SIZE - 1;
2579 __storage_key_init_range(start, end);
2580 set_bit(PG_arch_1, &page->flags);
2584 int s390_enable_skey(void)
2586 struct mm_walk walk = {
2587 .hugetlb_entry = __s390_enable_skey_hugetlb,
2588 .pte_entry = __s390_enable_skey_pte,
2590 struct mm_struct *mm = current->mm;
2591 struct vm_area_struct *vma;
2594 down_write(&mm->mmap_sem);
2595 if (mm_uses_skeys(mm))
2598 mm->context.uses_skeys = 1;
2599 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2600 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2601 MADV_UNMERGEABLE, &vma->vm_flags)) {
2602 mm->context.uses_skeys = 0;
2607 mm->def_flags &= ~VM_MERGEABLE;
2610 walk_page_range(0, TASK_SIZE, &walk);
2613 up_write(&mm->mmap_sem);
2616 EXPORT_SYMBOL_GPL(s390_enable_skey);
2619 * Reset CMMA state, make all pages stable again.
2621 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2622 unsigned long next, struct mm_walk *walk)
2624 ptep_zap_unused(walk->mm, addr, pte, 1);
2628 void s390_reset_cmma(struct mm_struct *mm)
2630 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2632 down_write(&mm->mmap_sem);
2634 walk_page_range(0, TASK_SIZE, &walk);
2635 up_write(&mm->mmap_sem);
2637 EXPORT_SYMBOL_GPL(s390_reset_cmma);