x86/mm/kasan: don't use vmemmap_populate() to initialize shadow
[sfrench/cifs-2.6.git] / arch / x86 / mm / kasan_init_64.c
1 // SPDX-License-Identifier: GPL-2.0
2 #define DISABLE_BRANCH_PROFILING
3 #define pr_fmt(fmt) "kasan: " fmt
4 #include <linux/bootmem.h>
5 #include <linux/kasan.h>
6 #include <linux/kdebug.h>
7 #include <linux/memblock.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task.h>
11 #include <linux/vmalloc.h>
12
13 #include <asm/e820/types.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/sections.h>
17 #include <asm/pgtable.h>
18
19 extern struct range pfn_mapped[E820_MAX_ENTRIES];
20
21 static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
22
23 static __init void *early_alloc(size_t size, int nid)
24 {
25         return memblock_virt_alloc_try_nid_nopanic(size, size,
26                 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
27 }
28
29 static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
30                                       unsigned long end, int nid)
31 {
32         pte_t *pte;
33
34         if (pmd_none(*pmd)) {
35                 void *p;
36
37                 if (boot_cpu_has(X86_FEATURE_PSE) &&
38                     ((end - addr) == PMD_SIZE) &&
39                     IS_ALIGNED(addr, PMD_SIZE)) {
40                         p = early_alloc(PMD_SIZE, nid);
41                         if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
42                                 return;
43                         else if (p)
44                                 memblock_free(__pa(p), PMD_SIZE);
45                 }
46
47                 p = early_alloc(PAGE_SIZE, nid);
48                 pmd_populate_kernel(&init_mm, pmd, p);
49         }
50
51         pte = pte_offset_kernel(pmd, addr);
52         do {
53                 pte_t entry;
54                 void *p;
55
56                 if (!pte_none(*pte))
57                         continue;
58
59                 p = early_alloc(PAGE_SIZE, nid);
60                 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
61                 set_pte_at(&init_mm, addr, pte, entry);
62         } while (pte++, addr += PAGE_SIZE, addr != end);
63 }
64
65 static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
66                                       unsigned long end, int nid)
67 {
68         pmd_t *pmd;
69         unsigned long next;
70
71         if (pud_none(*pud)) {
72                 void *p;
73
74                 if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
75                     ((end - addr) == PUD_SIZE) &&
76                     IS_ALIGNED(addr, PUD_SIZE)) {
77                         p = early_alloc(PUD_SIZE, nid);
78                         if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
79                                 return;
80                         else if (p)
81                                 memblock_free(__pa(p), PUD_SIZE);
82                 }
83
84                 p = early_alloc(PAGE_SIZE, nid);
85                 pud_populate(&init_mm, pud, p);
86         }
87
88         pmd = pmd_offset(pud, addr);
89         do {
90                 next = pmd_addr_end(addr, end);
91                 if (!pmd_large(*pmd))
92                         kasan_populate_pmd(pmd, addr, next, nid);
93         } while (pmd++, addr = next, addr != end);
94 }
95
96 static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
97                                       unsigned long end, int nid)
98 {
99         pud_t *pud;
100         unsigned long next;
101
102         if (p4d_none(*p4d)) {
103                 void *p = early_alloc(PAGE_SIZE, nid);
104
105                 p4d_populate(&init_mm, p4d, p);
106         }
107
108         pud = pud_offset(p4d, addr);
109         do {
110                 next = pud_addr_end(addr, end);
111                 if (!pud_large(*pud))
112                         kasan_populate_pud(pud, addr, next, nid);
113         } while (pud++, addr = next, addr != end);
114 }
115
116 static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
117                                       unsigned long end, int nid)
118 {
119         void *p;
120         p4d_t *p4d;
121         unsigned long next;
122
123         if (pgd_none(*pgd)) {
124                 p = early_alloc(PAGE_SIZE, nid);
125                 pgd_populate(&init_mm, pgd, p);
126         }
127
128         p4d = p4d_offset(pgd, addr);
129         do {
130                 next = p4d_addr_end(addr, end);
131                 kasan_populate_p4d(p4d, addr, next, nid);
132         } while (p4d++, addr = next, addr != end);
133 }
134
135 static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
136                                          int nid)
137 {
138         pgd_t *pgd;
139         unsigned long next;
140
141         addr = addr & PAGE_MASK;
142         end = round_up(end, PAGE_SIZE);
143         pgd = pgd_offset_k(addr);
144         do {
145                 next = pgd_addr_end(addr, end);
146                 kasan_populate_pgd(pgd, addr, next, nid);
147         } while (pgd++, addr = next, addr != end);
148 }
149
150 static void __init map_range(struct range *range)
151 {
152         unsigned long start;
153         unsigned long end;
154
155         start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
156         end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
157
158         kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
159 }
160
161 static void __init clear_pgds(unsigned long start,
162                         unsigned long end)
163 {
164         pgd_t *pgd;
165         /* See comment in kasan_init() */
166         unsigned long pgd_end = end & PGDIR_MASK;
167
168         for (; start < pgd_end; start += PGDIR_SIZE) {
169                 pgd = pgd_offset_k(start);
170                 /*
171                  * With folded p4d, pgd_clear() is nop, use p4d_clear()
172                  * instead.
173                  */
174                 if (CONFIG_PGTABLE_LEVELS < 5)
175                         p4d_clear(p4d_offset(pgd, start));
176                 else
177                         pgd_clear(pgd);
178         }
179
180         pgd = pgd_offset_k(start);
181         for (; start < end; start += P4D_SIZE)
182                 p4d_clear(p4d_offset(pgd, start));
183 }
184
185 static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
186 {
187         unsigned long p4d;
188
189         if (!IS_ENABLED(CONFIG_X86_5LEVEL))
190                 return (p4d_t *)pgd;
191
192         p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
193         p4d += __START_KERNEL_map - phys_base;
194         return (p4d_t *)p4d + p4d_index(addr);
195 }
196
197 static void __init kasan_early_p4d_populate(pgd_t *pgd,
198                 unsigned long addr,
199                 unsigned long end)
200 {
201         pgd_t pgd_entry;
202         p4d_t *p4d, p4d_entry;
203         unsigned long next;
204
205         if (pgd_none(*pgd)) {
206                 pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
207                 set_pgd(pgd, pgd_entry);
208         }
209
210         p4d = early_p4d_offset(pgd, addr);
211         do {
212                 next = p4d_addr_end(addr, end);
213
214                 if (!p4d_none(*p4d))
215                         continue;
216
217                 p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
218                 set_p4d(p4d, p4d_entry);
219         } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
220 }
221
222 static void __init kasan_map_early_shadow(pgd_t *pgd)
223 {
224         /* See comment in kasan_init() */
225         unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
226         unsigned long end = KASAN_SHADOW_END;
227         unsigned long next;
228
229         pgd += pgd_index(addr);
230         do {
231                 next = pgd_addr_end(addr, end);
232                 kasan_early_p4d_populate(pgd, addr, next);
233         } while (pgd++, addr = next, addr != end);
234 }
235
236 #ifdef CONFIG_KASAN_INLINE
237 static int kasan_die_handler(struct notifier_block *self,
238                              unsigned long val,
239                              void *data)
240 {
241         if (val == DIE_GPF) {
242                 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
243                 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
244         }
245         return NOTIFY_OK;
246 }
247
248 static struct notifier_block kasan_die_notifier = {
249         .notifier_call = kasan_die_handler,
250 };
251 #endif
252
253 void __init kasan_early_init(void)
254 {
255         int i;
256         pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
257         pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
258         pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
259         p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
260
261         for (i = 0; i < PTRS_PER_PTE; i++)
262                 kasan_zero_pte[i] = __pte(pte_val);
263
264         for (i = 0; i < PTRS_PER_PMD; i++)
265                 kasan_zero_pmd[i] = __pmd(pmd_val);
266
267         for (i = 0; i < PTRS_PER_PUD; i++)
268                 kasan_zero_pud[i] = __pud(pud_val);
269
270         for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
271                 kasan_zero_p4d[i] = __p4d(p4d_val);
272
273         kasan_map_early_shadow(early_top_pgt);
274         kasan_map_early_shadow(init_top_pgt);
275 }
276
277 void __init kasan_init(void)
278 {
279         int i;
280
281 #ifdef CONFIG_KASAN_INLINE
282         register_die_notifier(&kasan_die_notifier);
283 #endif
284
285         memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
286
287         /*
288          * We use the same shadow offset for 4- and 5-level paging to
289          * facilitate boot-time switching between paging modes.
290          * As result in 5-level paging mode KASAN_SHADOW_START and
291          * KASAN_SHADOW_END are not aligned to PGD boundary.
292          *
293          * KASAN_SHADOW_START doesn't share PGD with anything else.
294          * We claim whole PGD entry to make things easier.
295          *
296          * KASAN_SHADOW_END lands in the last PGD entry and it collides with
297          * bunch of things like kernel code, modules, EFI mapping, etc.
298          * We need to take extra steps to not overwrite them.
299          */
300         if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
301                 void *ptr;
302
303                 ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
304                 memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
305                 set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
306                                 __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
307         }
308
309         load_cr3(early_top_pgt);
310         __flush_tlb_all();
311
312         clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
313
314         kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
315                         kasan_mem_to_shadow((void *)PAGE_OFFSET));
316
317         for (i = 0; i < E820_MAX_ENTRIES; i++) {
318                 if (pfn_mapped[i].end == 0)
319                         break;
320
321                 map_range(&pfn_mapped[i]);
322         }
323
324         kasan_populate_zero_shadow(
325                 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
326                 kasan_mem_to_shadow((void *)__START_KERNEL_map));
327
328         kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
329                               (unsigned long)kasan_mem_to_shadow(_end),
330                               early_pfn_to_nid(__pa(_stext)));
331
332         kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
333                         (void *)KASAN_SHADOW_END);
334
335         load_cr3(init_top_pgt);
336         __flush_tlb_all();
337
338         /*
339          * kasan_zero_page has been used as early shadow memory, thus it may
340          * contain some garbage. Now we can clear and write protect it, since
341          * after the TLB flush no one should write to it.
342          */
343         memset(kasan_zero_page, 0, PAGE_SIZE);
344         for (i = 0; i < PTRS_PER_PTE; i++) {
345                 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
346                 set_pte(&kasan_zero_pte[i], pte);
347         }
348         /* Flush TLBs again to be sure that write protection applied. */
349         __flush_tlb_all();
350
351         init_task.kasan_depth = 0;
352         pr_info("KernelAddressSanitizer initialized\n");
353 }