Merge tag 'xfs-4.13-merge-5' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[sfrench/cifs-2.6.git] / arch / x86 / mm / kasan_init_64.c
1 #define DISABLE_BRANCH_PROFILING
2 #define pr_fmt(fmt) "kasan: " fmt
3 #include <linux/bootmem.h>
4 #include <linux/kasan.h>
5 #include <linux/kdebug.h>
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/sched/task.h>
9 #include <linux/vmalloc.h>
10
11 #include <asm/e820/types.h>
12 #include <asm/tlbflush.h>
13 #include <asm/sections.h>
14
15 extern pgd_t early_top_pgt[PTRS_PER_PGD];
16 extern struct range pfn_mapped[E820_MAX_ENTRIES];
17
18 static int __init map_range(struct range *range)
19 {
20         unsigned long start;
21         unsigned long end;
22
23         start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
24         end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
25
26         /*
27          * end + 1 here is intentional. We check several shadow bytes in advance
28          * to slightly speed up fastpath. In some rare cases we could cross
29          * boundary of mapped shadow, so we just map some more here.
30          */
31         return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
32 }
33
34 static void __init clear_pgds(unsigned long start,
35                         unsigned long end)
36 {
37         pgd_t *pgd;
38
39         for (; start < end; start += PGDIR_SIZE) {
40                 pgd = pgd_offset_k(start);
41                 /*
42                  * With folded p4d, pgd_clear() is nop, use p4d_clear()
43                  * instead.
44                  */
45                 if (CONFIG_PGTABLE_LEVELS < 5)
46                         p4d_clear(p4d_offset(pgd, start));
47                 else
48                         pgd_clear(pgd);
49         }
50 }
51
52 static void __init kasan_map_early_shadow(pgd_t *pgd)
53 {
54         int i;
55         unsigned long start = KASAN_SHADOW_START;
56         unsigned long end = KASAN_SHADOW_END;
57
58         for (i = pgd_index(start); start < end; i++) {
59                 switch (CONFIG_PGTABLE_LEVELS) {
60                 case 4:
61                         pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
62                                         _KERNPG_TABLE);
63                         break;
64                 case 5:
65                         pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
66                                         _KERNPG_TABLE);
67                         break;
68                 default:
69                         BUILD_BUG();
70                 }
71                 start += PGDIR_SIZE;
72         }
73 }
74
75 #ifdef CONFIG_KASAN_INLINE
76 static int kasan_die_handler(struct notifier_block *self,
77                              unsigned long val,
78                              void *data)
79 {
80         if (val == DIE_GPF) {
81                 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
82                 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
83         }
84         return NOTIFY_OK;
85 }
86
87 static struct notifier_block kasan_die_notifier = {
88         .notifier_call = kasan_die_handler,
89 };
90 #endif
91
92 void __init kasan_early_init(void)
93 {
94         int i;
95         pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
96         pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
97         pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
98         p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
99
100         for (i = 0; i < PTRS_PER_PTE; i++)
101                 kasan_zero_pte[i] = __pte(pte_val);
102
103         for (i = 0; i < PTRS_PER_PMD; i++)
104                 kasan_zero_pmd[i] = __pmd(pmd_val);
105
106         for (i = 0; i < PTRS_PER_PUD; i++)
107                 kasan_zero_pud[i] = __pud(pud_val);
108
109         for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
110                 kasan_zero_p4d[i] = __p4d(p4d_val);
111
112         kasan_map_early_shadow(early_top_pgt);
113         kasan_map_early_shadow(init_top_pgt);
114 }
115
116 void __init kasan_init(void)
117 {
118         int i;
119
120 #ifdef CONFIG_KASAN_INLINE
121         register_die_notifier(&kasan_die_notifier);
122 #endif
123
124         memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
125         load_cr3(early_top_pgt);
126         __flush_tlb_all();
127
128         clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
129
130         kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
131                         kasan_mem_to_shadow((void *)PAGE_OFFSET));
132
133         for (i = 0; i < E820_MAX_ENTRIES; i++) {
134                 if (pfn_mapped[i].end == 0)
135                         break;
136
137                 if (map_range(&pfn_mapped[i]))
138                         panic("kasan: unable to allocate shadow!");
139         }
140         kasan_populate_zero_shadow(
141                 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
142                 kasan_mem_to_shadow((void *)__START_KERNEL_map));
143
144         vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
145                         (unsigned long)kasan_mem_to_shadow(_end),
146                         NUMA_NO_NODE);
147
148         kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
149                         (void *)KASAN_SHADOW_END);
150
151         load_cr3(init_top_pgt);
152         __flush_tlb_all();
153
154         /*
155          * kasan_zero_page has been used as early shadow memory, thus it may
156          * contain some garbage. Now we can clear and write protect it, since
157          * after the TLB flush no one should write to it.
158          */
159         memset(kasan_zero_page, 0, PAGE_SIZE);
160         for (i = 0; i < PTRS_PER_PTE; i++) {
161                 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
162                 set_pte(&kasan_zero_pte[i], pte);
163         }
164         /* Flush TLBs again to be sure that write protection applied. */
165         __flush_tlb_all();
166
167         init_task.kasan_depth = 0;
168         pr_info("KernelAddressSanitizer initialized\n");
169 }