1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 #include <linux/sched.h>
7 #include <linux/mm_types.h>
8 #include <linux/memblock.h>
9 #include <linux/memremap.h>
10 #include <linux/pkeys.h>
11 #include <linux/debugfs.h>
12 #include <misc/cxl-base.h>
14 #include <asm/pgalloc.h>
16 #include <asm/trace.h>
17 #include <asm/powernv.h>
18 #include <asm/firmware.h>
19 #include <asm/ultravisor.h>
20 #include <asm/kexec.h>
22 #include <mm/mmu_decl.h>
23 #include <trace/events/thp.h>
27 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
28 EXPORT_SYMBOL_GPL(mmu_psize_defs);
30 #ifdef CONFIG_SPARSEMEM_VMEMMAP
31 int mmu_vmemmap_psize = MMU_PAGE_4K;
34 unsigned long __pmd_frag_nr;
35 EXPORT_SYMBOL(__pmd_frag_nr);
36 unsigned long __pmd_frag_size_shift;
37 EXPORT_SYMBOL(__pmd_frag_size_shift);
39 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
41 * This is called when relaxing access to a hugepage. It's also called in the page
42 * fault path when we don't hit any of the major fault cases, ie, a minor
43 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
44 * handled those two for us, we additionally deal with missing execute
45 * permission here on some processors
47 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
48 pmd_t *pmdp, pmd_t entry, int dirty)
51 #ifdef CONFIG_DEBUG_VM
52 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
53 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
55 changed = !pmd_same(*(pmdp), entry);
58 * We can use MMU_PAGE_2M here, because only radix
59 * path look at the psize.
61 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
62 pmd_pte(entry), address, MMU_PAGE_2M);
67 int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
68 pud_t *pudp, pud_t entry, int dirty)
71 #ifdef CONFIG_DEBUG_VM
72 WARN_ON(!pud_devmap(*pudp));
73 assert_spin_locked(pud_lockptr(vma->vm_mm, pudp));
75 changed = !pud_same(*(pudp), entry);
78 * We can use MMU_PAGE_1G here, because only radix
79 * path look at the psize.
81 __ptep_set_access_flags(vma, pudp_ptep(pudp),
82 pud_pte(entry), address, MMU_PAGE_1G);
88 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
89 unsigned long address, pmd_t *pmdp)
91 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
94 int pudp_test_and_clear_young(struct vm_area_struct *vma,
95 unsigned long address, pud_t *pudp)
97 return __pudp_test_and_clear_young(vma->vm_mm, address, pudp);
101 * set a new huge pmd. We should not be called for updating
102 * an existing pmd entry. That should go via pmd_hugepage_update.
104 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
105 pmd_t *pmdp, pmd_t pmd)
107 #ifdef CONFIG_DEBUG_VM
109 * Make sure hardware valid bit is not set. We don't do
110 * tlb flush for this update.
113 WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
114 assert_spin_locked(pmd_lockptr(mm, pmdp));
115 WARN_ON(!(pmd_large(pmd)));
117 trace_hugepage_set_pmd(addr, pmd_val(pmd));
118 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
121 void set_pud_at(struct mm_struct *mm, unsigned long addr,
122 pud_t *pudp, pud_t pud)
124 #ifdef CONFIG_DEBUG_VM
126 * Make sure hardware valid bit is not set. We don't do
127 * tlb flush for this update.
130 WARN_ON(pte_hw_valid(pud_pte(*pudp)));
131 assert_spin_locked(pud_lockptr(mm, pudp));
132 WARN_ON(!(pud_large(pud)));
134 trace_hugepage_set_pud(addr, pud_val(pud));
135 return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
138 static void do_serialize(void *arg)
140 /* We've taken the IPI, so try to trim the mask while here */
141 if (radix_enabled()) {
142 struct mm_struct *mm = arg;
143 exit_lazy_flush_tlb(mm, false);
148 * Serialize against __find_linux_pte() which does lock-less
149 * lookup in page tables with local interrupts disabled. For huge pages
150 * it casts pmd_t to pte_t. Since format of pte_t is different from
151 * pmd_t we want to prevent transit from pmd pointing to page table
152 * to pmd pointing to huge page (and back) while interrupts are disabled.
153 * We clear pmd to possibly replace it with page table pointer in
154 * different code paths. So make sure we wait for the parallel
155 * __find_linux_pte() to finish.
157 void serialize_against_pte_lookup(struct mm_struct *mm)
160 smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1);
164 * We use this to invalidate a pmdp entry before switching from a
165 * hugepte to regular pmd entry.
167 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
170 unsigned long old_pmd;
172 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
173 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
174 return __pmd(old_pmd);
177 pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
178 unsigned long addr, pmd_t *pmdp, int full)
181 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
182 VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
183 !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
184 pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
186 * if it not a fullmm flush, then we can possibly end up converting
187 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
188 * Make sure we flush the tlb in this case.
191 flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
195 pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
196 unsigned long addr, pud_t *pudp, int full)
200 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
201 VM_BUG_ON((pud_present(*pudp) && !pud_devmap(*pudp)) ||
202 !pud_present(*pudp));
203 pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp);
205 * if it not a fullmm flush, then we can possibly end up converting
206 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
207 * Make sure we flush the tlb in this case.
210 flush_pud_tlb_range(vma, addr, addr + HPAGE_PUD_SIZE);
214 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
216 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
219 static pud_t pud_set_protbits(pud_t pud, pgprot_t pgprot)
221 return __pud(pud_val(pud) | pgprot_val(pgprot));
225 * At some point we should be able to get rid of
226 * pmd_mkhuge() and mk_huge_pmd() when we update all the
227 * other archs to mark the pmd huge in pfn_pmd()
229 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
233 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
235 return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot));
238 pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
242 pudv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
244 return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot));
247 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
249 return pfn_pmd(page_to_pfn(page), pgprot);
252 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
257 pmdv &= _HPAGE_CHG_MASK;
258 return pmd_set_protbits(__pmd(pmdv), newprot);
260 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
262 /* For use by kexec, called with MMU off */
263 notrace void mmu_cleanup_all(void)
266 radix__mmu_cleanup_all();
267 else if (mmu_hash_ops.hpte_clear_all)
268 mmu_hash_ops.hpte_clear_all();
273 #ifdef CONFIG_MEMORY_HOTPLUG
274 int __meminit create_section_mapping(unsigned long start, unsigned long end,
275 int nid, pgprot_t prot)
278 return radix__create_section_mapping(start, end, nid, prot);
280 return hash__create_section_mapping(start, end, nid, prot);
283 int __meminit remove_section_mapping(unsigned long start, unsigned long end)
286 return radix__remove_section_mapping(start, end);
288 return hash__remove_section_mapping(start, end);
290 #endif /* CONFIG_MEMORY_HOTPLUG */
292 void __init mmu_partition_table_init(void)
294 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
297 /* Initialize the Partition Table with no entries */
298 partition_tb = memblock_alloc(patb_size, patb_size);
300 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
301 __func__, patb_size, patb_size);
303 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
304 set_ptcr_when_no_uv(ptcr);
305 powernv_set_nmmu_ptcr(ptcr);
308 static void flush_partition(unsigned int lpid, bool radix)
311 radix__flush_all_lpid(lpid);
312 radix__flush_all_lpid_guest(lpid);
314 asm volatile("ptesync" : : : "memory");
315 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
316 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
317 /* do we need fixup here ?*/
318 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
319 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
323 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
324 unsigned long dw1, bool flush)
326 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
329 * When ultravisor is enabled, the partition table is stored in secure
330 * memory and can only be accessed doing an ultravisor call. However, we
331 * maintain a copy of the partition table in normal memory to allow Nest
332 * MMU translations to occur (for normal VMs).
334 * Therefore, here we always update partition_tb, regardless of whether
335 * we are running under an ultravisor or not.
337 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
338 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
341 * If ultravisor is enabled, we do an ultravisor call to register the
342 * partition table entry (PATE), which also do a global flush of TLBs
343 * and partition table caches for the lpid. Otherwise, just do the
344 * flush. The type of flush (hash or radix) depends on what the previous
345 * use of the partition ID was, not the new use.
347 if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
348 uv_register_pate(lpid, dw0, dw1);
349 pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
353 * Boot does not need to flush, because MMU is off and each
354 * CPU does a tlbiel_all() before switching them on, which
355 * flushes everything.
357 flush_partition(lpid, (old & PATB_HR));
360 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
362 static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
364 void *pmd_frag, *ret;
366 if (PMD_FRAG_NR == 1)
369 spin_lock(&mm->page_table_lock);
370 ret = mm->context.pmd_frag;
372 pmd_frag = ret + PMD_FRAG_SIZE;
374 * If we have taken up all the fragments mark PTE page NULL
376 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
378 mm->context.pmd_frag = pmd_frag;
380 spin_unlock(&mm->page_table_lock);
384 static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
387 struct ptdesc *ptdesc;
388 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
391 gfp &= ~__GFP_ACCOUNT;
392 ptdesc = pagetable_alloc(gfp, 0);
395 if (!pagetable_pmd_ctor(ptdesc)) {
396 pagetable_free(ptdesc);
400 atomic_set(&ptdesc->pt_frag_refcount, 1);
402 ret = ptdesc_address(ptdesc);
404 * if we support only one fragment just return the
407 if (PMD_FRAG_NR == 1)
410 spin_lock(&mm->page_table_lock);
412 * If we find ptdesc_page set, we return
413 * the allocated page with single fragment
416 if (likely(!mm->context.pmd_frag)) {
417 atomic_set(&ptdesc->pt_frag_refcount, PMD_FRAG_NR);
418 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
420 spin_unlock(&mm->page_table_lock);
425 pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
429 pmd = get_pmd_from_cache(mm);
433 return __alloc_for_pmdcache(mm);
436 void pmd_fragment_free(unsigned long *pmd)
438 struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
440 if (pagetable_is_reserved(ptdesc))
441 return free_reserved_ptdesc(ptdesc);
443 BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
444 if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
445 pagetable_pmd_dtor(ptdesc);
446 pagetable_free(ptdesc);
450 static inline void pgtable_free(void *table, int index)
454 pte_fragment_free(table, 0);
457 pmd_fragment_free(table);
462 #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
463 /* 16M hugepd directory at pud level */
465 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
466 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
468 /* 16G hugepd directory at the pgd level */
470 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
471 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
474 /* We don't free pgd table via RCU callback */
480 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
482 unsigned long pgf = (unsigned long)table;
484 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
486 tlb_remove_table(tlb, (void *)pgf);
489 void __tlb_remove_table(void *_table)
491 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
492 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
494 return pgtable_free(table, index);
497 #ifdef CONFIG_PROC_FS
498 atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
500 void arch_report_meminfo(struct seq_file *m)
503 * Hash maps the memory with one size mmu_linear_psize.
504 * So don't bother to print these on hash
506 if (!radix_enabled())
508 seq_printf(m, "DirectMap4k: %8lu kB\n",
509 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
510 seq_printf(m, "DirectMap64k: %8lu kB\n",
511 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
512 seq_printf(m, "DirectMap2M: %8lu kB\n",
513 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
514 seq_printf(m, "DirectMap1G: %8lu kB\n",
515 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
517 #endif /* CONFIG_PROC_FS */
519 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
522 unsigned long pte_val;
525 * Clear the _PAGE_PRESENT so that no hardware parallel update is
526 * possible. Also keep the pte_present true so that we don't take
529 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
531 return __pte(pte_val);
535 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
536 pte_t *ptep, pte_t old_pte, pte_t pte)
539 return radix__ptep_modify_prot_commit(vma, addr,
541 set_pte_at(vma->vm_mm, addr, ptep, pte);
545 * For hash translation mode, we use the deposited table to store hash slot
546 * information and they are stored at PTRS_PER_PMD offset from related pmd
547 * location. Hence a pmd move requires deposit and withdraw.
549 * For radix translation with split pmd ptl, we store the deposited table in the
550 * pmd page. Hence if we have different pmd page we need to withdraw during pmd
553 * With hash we use deposited table always irrespective of anon or not.
554 * With radix we use deposited table only for anonymous mapping.
556 int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
557 struct spinlock *old_pmd_ptl,
558 struct vm_area_struct *vma)
561 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
567 * Does the CPU support tlbie?
569 bool tlbie_capable __read_mostly = true;
570 EXPORT_SYMBOL(tlbie_capable);
573 * Should tlbie be used for management of CPU TLBs, for kernel and process
574 * address spaces? tlbie may still be used for nMMU accelerators, and for KVM
575 * guest address spaces.
577 bool tlbie_enabled __read_mostly = true;
579 static int __init setup_disable_tlbie(char *str)
581 if (!radix_enabled()) {
582 pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
586 tlbie_capable = false;
587 tlbie_enabled = false;
591 __setup("disable_tlbie", setup_disable_tlbie);
593 static int __init pgtable_debugfs_setup(void)
599 * There is no locking vs tlb flushing when changing this value.
600 * The tlb flushers will see one value or another, and use either
601 * tlbie or tlbiel with IPIs. In both cases the TLBs will be
602 * invalidated as expected.
604 debugfs_create_bool("tlbie_enabled", 0600,
610 arch_initcall(pgtable_debugfs_setup);
612 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN)
614 * Override the generic version in mm/memremap.c.
616 * With hash translation, the direct-map range is mapped with just one
617 * page size selected by htab_init_page_sizes(). Consult
618 * mmu_psize_defs[] to determine the minimum page size alignment.
620 unsigned long memremap_compat_align(void)
622 if (!radix_enabled()) {
623 unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
624 return max(SUBSECTION_SIZE, 1UL << shift);
627 return SUBSECTION_SIZE;
629 EXPORT_SYMBOL_GPL(memremap_compat_align);
632 pgprot_t vm_get_page_prot(unsigned long vm_flags)
636 /* Radix supports execute-only, but protection_map maps X -> RX */
637 if (radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) {
638 prot = pgprot_val(PAGE_EXECONLY);
640 prot = pgprot_val(protection_map[vm_flags &
641 (VM_ACCESS_FLAGS | VM_SHARED)]);
644 if (vm_flags & VM_SAO)
647 #ifdef CONFIG_PPC_MEM_KEYS
648 prot |= vmflag_to_pte_pkey_bits(vm_flags);
651 return __pgprot(prot);
653 EXPORT_SYMBOL(vm_get_page_prot);