Merge tag 'riscv-for-linus-5.12-mw0' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / riscv / mm / kasan_init.c
index a8a2ffd9114aaa22c60661c7cd2d80023747cb2e..3fc18f469efbc6f1e3b05f8fbba71bc9345315fa 100644 (file)
@@ -9,6 +9,19 @@
 #include <linux/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
+#include <asm/pgalloc.h>
+
+static __init void *early_alloc(size_t size, int node)
+{
+       void *ptr = memblock_alloc_try_nid(size, size,
+               __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
+
+       if (!ptr)
+               panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
+                       __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
+
+       return ptr;
+}
 
 extern pgd_t early_pg_dir[PTRS_PER_PGD];
 asmlinkage void __init kasan_early_init(void)
@@ -47,40 +60,133 @@ asmlinkage void __init kasan_early_init(void)
        local_flush_tlb_all();
 }
 
-static void __init populate(void *start, void *end)
+static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
+{
+       phys_addr_t phys_addr;
+       pte_t *ptep, *base_pte;
+
+       if (pmd_none(*pmd))
+               base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
+       else
+               base_pte = (pte_t *)pmd_page_vaddr(*pmd);
+
+       ptep = base_pte + pte_index(vaddr);
+
+       do {
+               if (pte_none(*ptep)) {
+                       phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+                       set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
+               }
+       } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
+
+       set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
+}
+
+static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
+{
+       phys_addr_t phys_addr;
+       pmd_t *pmdp, *base_pmd;
+       unsigned long next;
+
+       base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
+       if (base_pmd == lm_alias(kasan_early_shadow_pmd))
+               base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
+
+       pmdp = base_pmd + pmd_index(vaddr);
+
+       do {
+               next = pmd_addr_end(vaddr, end);
+
+               if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
+                       phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
+                       if (phys_addr) {
+                               set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
+                               continue;
+                       }
+               }
+
+               kasan_populate_pte(pmdp, vaddr, next);
+       } while (pmdp++, vaddr = next, vaddr != end);
+
+       /*
+        * Wait for the whole PGD to be populated before setting the PGD in
+        * the page table, otherwise, if we did set the PGD before populating
+        * it entirely, memblock could allocate a page at a physical address
+        * where KASAN is not populated yet and then we'd get a page fault.
+        */
+       set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
+}
+
+static void kasan_populate_pgd(unsigned long vaddr, unsigned long end)
+{
+       phys_addr_t phys_addr;
+       pgd_t *pgdp = pgd_offset_k(vaddr);
+       unsigned long next;
+
+       do {
+               next = pgd_addr_end(vaddr, end);
+
+               /*
+                * pgdp can't be none since kasan_early_init initialized all KASAN
+                * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
+                * that means we can try to allocate a hugepage as a replacement.
+                */
+               if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
+                   IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
+                       phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
+                       if (phys_addr) {
+                               set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
+                               continue;
+                       }
+               }
+
+               kasan_populate_pmd(pgdp, vaddr, next);
+       } while (pgdp++, vaddr = next, vaddr != end);
+}
+
+static void __init kasan_populate(void *start, void *end)
 {
-       unsigned long i, offset;
        unsigned long vaddr = (unsigned long)start & PAGE_MASK;
        unsigned long vend = PAGE_ALIGN((unsigned long)end);
-       unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
-       unsigned long n_ptes =
-           ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
-       unsigned long n_pmds =
-           ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
-
-       pte_t *pte =
-           memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
-       pmd_t *pmd =
-           memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
-       pgd_t *pgd = pgd_offset_k(vaddr);
-
-       for (i = 0; i < n_pages; i++) {
-               phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
-               set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
-       }
-
-       for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
-               set_pmd(&pmd[i],
-                       pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
-                               __pgprot(_PAGE_TABLE)));
 
-       for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
-               set_pgd(&pgd[i],
-                       pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
-                               __pgprot(_PAGE_TABLE)));
+       kasan_populate_pgd(vaddr, vend);
 
        local_flush_tlb_all();
-       memset(start, 0, end - start);
+       memset(start, KASAN_SHADOW_INIT, end - start);
+}
+
+void __init kasan_shallow_populate(void *start, void *end)
+{
+       unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+       unsigned long vend = PAGE_ALIGN((unsigned long)end);
+       unsigned long pfn;
+       int index;
+       void *p;
+       pud_t *pud_dir, *pud_k;
+       pgd_t *pgd_dir, *pgd_k;
+       p4d_t *p4d_dir, *p4d_k;
+
+       while (vaddr < vend) {
+               index = pgd_index(vaddr);
+               pfn = csr_read(CSR_SATP) & SATP_PPN;
+               pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
+               pgd_k = init_mm.pgd + index;
+               pgd_dir = pgd_offset_k(vaddr);
+               set_pgd(pgd_dir, *pgd_k);
+
+               p4d_dir = p4d_offset(pgd_dir, vaddr);
+               p4d_k  = p4d_offset(pgd_k, vaddr);
+
+               vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
+               pud_dir = pud_offset(p4d_dir, vaddr);
+               pud_k = pud_offset(p4d_k, vaddr);
+
+               if (pud_present(*pud_dir)) {
+                       p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+                       pud_populate(&init_mm, pud_dir, p);
+               }
+               vaddr += PAGE_SIZE;
+       }
 }
 
 void __init kasan_init(void)
@@ -90,7 +196,15 @@ void __init kasan_init(void)
 
        kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
                                    (void *)kasan_mem_to_shadow((void *)
-                                                               VMALLOC_END));
+                                                               VMEMMAP_END));
+       if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+               kasan_shallow_populate(
+                       (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+                       (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+       else
+               kasan_populate_early_shadow(
+                       (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+                       (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
 
        for_each_mem_range(i, &_start, &_end) {
                void *start = (void *)__va(_start);
@@ -99,7 +213,7 @@ void __init kasan_init(void)
                if (start >= end)
                        break;
 
-               populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
+               kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
        };
 
        for (i = 0; i < PTRS_PER_PTE; i++)
@@ -108,6 +222,6 @@ void __init kasan_init(void)
                               __pgprot(_PAGE_PRESENT | _PAGE_READ |
                                        _PAGE_ACCESSED)));
 
-       memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+       memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
        init_task.kasan_depth = 0;
 }