1 #define DISABLE_BRANCH_PROFILING
2 #define pr_fmt(fmt) "kasan: " fmt
3 #include <linux/bootmem.h>
4 #include <linux/kasan.h>
5 #include <linux/kdebug.h>
7 #include <linux/sched.h>
8 #include <linux/sched/task.h>
9 #include <linux/vmalloc.h>
11 #include <asm/e820/types.h>
12 #include <asm/tlbflush.h>
13 #include <asm/sections.h>
15 extern pgd_t early_top_pgt[PTRS_PER_PGD];
16 extern struct range pfn_mapped[E820_MAX_ENTRIES];
18 static int __init map_range(struct range *range)
23 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
24 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
27 * end + 1 here is intentional. We check several shadow bytes in advance
28 * to slightly speed up fastpath. In some rare cases we could cross
29 * boundary of mapped shadow, so we just map some more here.
31 return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
34 static void __init clear_pgds(unsigned long start,
39 for (; start < end; start += PGDIR_SIZE) {
40 pgd = pgd_offset_k(start);
42 * With folded p4d, pgd_clear() is nop, use p4d_clear()
45 if (CONFIG_PGTABLE_LEVELS < 5)
46 p4d_clear(p4d_offset(pgd, start));
52 static void __init kasan_map_early_shadow(pgd_t *pgd)
55 unsigned long start = KASAN_SHADOW_START;
56 unsigned long end = KASAN_SHADOW_END;
58 for (i = pgd_index(start); start < end; i++) {
59 switch (CONFIG_PGTABLE_LEVELS) {
61 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
65 pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
75 #ifdef CONFIG_KASAN_INLINE
76 static int kasan_die_handler(struct notifier_block *self,
81 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
82 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
87 static struct notifier_block kasan_die_notifier = {
88 .notifier_call = kasan_die_handler,
92 void __init kasan_early_init(void)
95 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
96 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
97 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
98 p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
100 for (i = 0; i < PTRS_PER_PTE; i++)
101 kasan_zero_pte[i] = __pte(pte_val);
103 for (i = 0; i < PTRS_PER_PMD; i++)
104 kasan_zero_pmd[i] = __pmd(pmd_val);
106 for (i = 0; i < PTRS_PER_PUD; i++)
107 kasan_zero_pud[i] = __pud(pud_val);
109 for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
110 kasan_zero_p4d[i] = __p4d(p4d_val);
112 kasan_map_early_shadow(early_top_pgt);
113 kasan_map_early_shadow(init_top_pgt);
116 void __init kasan_init(void)
120 #ifdef CONFIG_KASAN_INLINE
121 register_die_notifier(&kasan_die_notifier);
124 memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
125 load_cr3(early_top_pgt);
128 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
130 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
131 kasan_mem_to_shadow((void *)PAGE_OFFSET));
133 for (i = 0; i < E820_MAX_ENTRIES; i++) {
134 if (pfn_mapped[i].end == 0)
137 if (map_range(&pfn_mapped[i]))
138 panic("kasan: unable to allocate shadow!");
140 kasan_populate_zero_shadow(
141 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
142 kasan_mem_to_shadow((void *)__START_KERNEL_map));
144 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
145 (unsigned long)kasan_mem_to_shadow(_end),
148 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
149 (void *)KASAN_SHADOW_END);
151 load_cr3(init_top_pgt);
155 * kasan_zero_page has been used as early shadow memory, thus it may
156 * contain some garbage. Now we can clear and write protect it, since
157 * after the TLB flush no one should write to it.
159 memset(kasan_zero_page, 0, PAGE_SIZE);
160 for (i = 0; i < PTRS_PER_PTE; i++) {
161 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
162 set_pte(&kasan_zero_pte[i], pte);
164 /* Flush TLBs again to be sure that write protection applied. */
167 init_task.kasan_depth = 0;
168 pr_info("KernelAddressSanitizer initialized\n");