Merge tag 'riscv-for-linus-6.5-mw2' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / riscv / mm / init.c
index 4fa420faa780899b4802fc12eba567d03cd458e5..70fb31960b639feeef86d7115fc3df1b6d8d45d3 100644 (file)
@@ -267,7 +267,6 @@ static void __init setup_bootmem(void)
        dma_contiguous_reserve(dma32_phys_limit);
        if (IS_ENABLED(CONFIG_64BIT))
                hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
-       memblock_allow_resize();
 }
 
 #ifdef CONFIG_MMU
@@ -357,7 +356,7 @@ static phys_addr_t __init alloc_pte_late(uintptr_t va)
        unsigned long vaddr;
 
        vaddr = __get_free_page(GFP_KERNEL);
-       BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)));
+       BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page((void *)vaddr)));
 
        return __pa(vaddr);
 }
@@ -440,7 +439,7 @@ static phys_addr_t __init alloc_pmd_late(uintptr_t va)
        unsigned long vaddr;
 
        vaddr = __get_free_page(GFP_KERNEL);
-       BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr)));
+       BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page((void *)vaddr)));
 
        return __pa(vaddr);
 }
@@ -1370,6 +1369,9 @@ void __init paging_init(void)
 {
        setup_bootmem();
        setup_vm_final();
+
+       /* Depend on that Linear Mapping is ready */
+       memblock_allow_resize();
 }
 
 void __init misc_mem_init(void)
@@ -1389,3 +1391,61 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
        return vmemmap_populate_basepages(start, end, node, NULL);
 }
 #endif
+
+#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
+/*
+ * Pre-allocates page-table pages for a specific area in the kernel
+ * page-table. Only the level which needs to be synchronized between
+ * all page-tables is allocated because the synchronization can be
+ * expensive.
+ */
+static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end,
+                                              const char *area)
+{
+       unsigned long addr;
+       const char *lvl;
+
+       for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+               pgd_t *pgd = pgd_offset_k(addr);
+               p4d_t *p4d;
+               pud_t *pud;
+               pmd_t *pmd;
+
+               lvl = "p4d";
+               p4d = p4d_alloc(&init_mm, pgd, addr);
+               if (!p4d)
+                       goto failed;
+
+               if (pgtable_l5_enabled)
+                       continue;
+
+               lvl = "pud";
+               pud = pud_alloc(&init_mm, p4d, addr);
+               if (!pud)
+                       goto failed;
+
+               if (pgtable_l4_enabled)
+                       continue;
+
+               lvl = "pmd";
+               pmd = pmd_alloc(&init_mm, pud, addr);
+               if (!pmd)
+                       goto failed;
+       }
+       return;
+
+failed:
+       /*
+        * The pages have to be there now or they will be missing in
+        * process page-tables later.
+        */
+       panic("Failed to pre-allocate %s pages for %s area\n", lvl, area);
+}
+
+void __init pgtable_cache_init(void)
+{
+       preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc");
+       if (IS_ENABLED(CONFIG_MODULES))
+               preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules");
+}
+#endif