arm64/mmu: ignore debug_pagealloc for kernel segments
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Thu, 9 Mar 2017 20:52:05 +0000 (21:52 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Thu, 23 Mar 2017 13:55:21 +0000 (13:55 +0000)
The debug_pagealloc facility manipulates kernel mappings in the linear
region at page granularity to detect out of bounds or use-after-free
accesses. Since the kernel segments are not allocated dynamically,
there is no point in taking the debug_pagealloc_enabled flag into
account for them, and we can use block mappings unconditionally.

Note that this applies equally to the linear alias of text/rodata:
we will never have dynamic allocations there given that the same
memory is statically in use by the kernel image.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/mm/mmu.c

index bb9179084217bf34ab75fb8cf22a8b20cdb39bf4..ec23aec6433fb1f5f9240862a8a4e3bc3ad5013c 100644 (file)
@@ -328,8 +328,7 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
                return;
        }
 
-       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
-                            NULL, debug_pagealloc_enabled());
+       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false);
 
        /* flush the TLBs after updating live kernel mappings */
        flush_tlb_kernel_range(virt, virt + size);
@@ -381,7 +380,7 @@ static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end
         */
        __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
                             kernel_end - kernel_start, PAGE_KERNEL,
-                            early_pgtable_alloc, debug_pagealloc_enabled());
+                            early_pgtable_alloc, false);
 }
 
 void __init mark_linear_text_alias_ro(void)
@@ -437,7 +436,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
        BUG_ON(!PAGE_ALIGNED(size));
 
        __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
-                            early_pgtable_alloc, debug_pagealloc_enabled());
+                            early_pgtable_alloc, false);
 
        vma->addr       = va_start;
        vma->phys_addr  = pa_start;