Merge branch 'next-tpm' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[sfrench/cifs-2.6.git] / arch / s390 / mm / kasan_init.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kasan.h>
3 #include <linux/sched/task.h>
4 #include <linux/memblock.h>
5 #include <asm/pgalloc.h>
6 #include <asm/pgtable.h>
7 #include <asm/kasan.h>
8 #include <asm/mem_detect.h>
9 #include <asm/processor.h>
10 #include <asm/sclp.h>
11 #include <asm/facility.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
14
15 static unsigned long segment_pos __initdata;
16 static unsigned long segment_low __initdata;
17 static unsigned long pgalloc_pos __initdata;
18 static unsigned long pgalloc_low __initdata;
19 static unsigned long pgalloc_freeable __initdata;
20 static bool has_edat __initdata;
21 static bool has_nx __initdata;
22
23 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
24
25 static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
26
27 static void __init kasan_early_panic(const char *reason)
28 {
29         sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
30         sclp_early_printk(reason);
31         disabled_wait(0);
32 }
33
34 static void * __init kasan_early_alloc_segment(void)
35 {
36         segment_pos -= _SEGMENT_SIZE;
37
38         if (segment_pos < segment_low)
39                 kasan_early_panic("out of memory during initialisation\n");
40
41         return (void *)segment_pos;
42 }
43
44 static void * __init kasan_early_alloc_pages(unsigned int order)
45 {
46         pgalloc_pos -= (PAGE_SIZE << order);
47
48         if (pgalloc_pos < pgalloc_low)
49                 kasan_early_panic("out of memory during initialisation\n");
50
51         return (void *)pgalloc_pos;
52 }
53
54 static void * __init kasan_early_crst_alloc(unsigned long val)
55 {
56         unsigned long *table;
57
58         table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
59         if (table)
60                 crst_table_init(table, val);
61         return table;
62 }
63
64 static pte_t * __init kasan_early_pte_alloc(void)
65 {
66         static void *pte_leftover;
67         pte_t *pte;
68
69         BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
70
71         if (!pte_leftover) {
72                 pte_leftover = kasan_early_alloc_pages(0);
73                 pte = pte_leftover + _PAGE_TABLE_SIZE;
74         } else {
75                 pte = pte_leftover;
76                 pte_leftover = NULL;
77         }
78         memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
79         return pte;
80 }
81
82 enum populate_mode {
83         POPULATE_ONE2ONE,
84         POPULATE_MAP,
85         POPULATE_ZERO_SHADOW
86 };
87 static void __init kasan_early_vmemmap_populate(unsigned long address,
88                                                 unsigned long end,
89                                                 enum populate_mode mode)
90 {
91         unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
92         pgd_t *pg_dir;
93         p4d_t *p4_dir;
94         pud_t *pu_dir;
95         pmd_t *pm_dir;
96         pte_t *pt_dir;
97
98         pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
99         if (!has_nx)
100                 pgt_prot_zero &= ~_PAGE_NOEXEC;
101         pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
102         sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
103
104         while (address < end) {
105                 pg_dir = pgd_offset_k(address);
106                 if (pgd_none(*pg_dir)) {
107                         if (mode == POPULATE_ZERO_SHADOW &&
108                             IS_ALIGNED(address, PGDIR_SIZE) &&
109                             end - address >= PGDIR_SIZE) {
110                                 pgd_populate(&init_mm, pg_dir, kasan_zero_p4d);
111                                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
112                                 continue;
113                         }
114                         p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
115                         pgd_populate(&init_mm, pg_dir, p4_dir);
116                 }
117
118                 p4_dir = p4d_offset(pg_dir, address);
119                 if (p4d_none(*p4_dir)) {
120                         if (mode == POPULATE_ZERO_SHADOW &&
121                             IS_ALIGNED(address, P4D_SIZE) &&
122                             end - address >= P4D_SIZE) {
123                                 p4d_populate(&init_mm, p4_dir, kasan_zero_pud);
124                                 address = (address + P4D_SIZE) & P4D_MASK;
125                                 continue;
126                         }
127                         pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
128                         p4d_populate(&init_mm, p4_dir, pu_dir);
129                 }
130
131                 pu_dir = pud_offset(p4_dir, address);
132                 if (pud_none(*pu_dir)) {
133                         if (mode == POPULATE_ZERO_SHADOW &&
134                             IS_ALIGNED(address, PUD_SIZE) &&
135                             end - address >= PUD_SIZE) {
136                                 pud_populate(&init_mm, pu_dir, kasan_zero_pmd);
137                                 address = (address + PUD_SIZE) & PUD_MASK;
138                                 continue;
139                         }
140                         pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
141                         pud_populate(&init_mm, pu_dir, pm_dir);
142                 }
143
144                 pm_dir = pmd_offset(pu_dir, address);
145                 if (pmd_none(*pm_dir)) {
146                         if (mode == POPULATE_ZERO_SHADOW &&
147                             IS_ALIGNED(address, PMD_SIZE) &&
148                             end - address >= PMD_SIZE) {
149                                 pmd_populate(&init_mm, pm_dir, kasan_zero_pte);
150                                 address = (address + PMD_SIZE) & PMD_MASK;
151                                 continue;
152                         }
153                         /* the first megabyte of 1:1 is mapped with 4k pages */
154                         if (has_edat && address && end - address >= PMD_SIZE &&
155                             mode != POPULATE_ZERO_SHADOW) {
156                                 void *page;
157
158                                 if (mode == POPULATE_ONE2ONE) {
159                                         page = (void *)address;
160                                 } else {
161                                         page = kasan_early_alloc_segment();
162                                         memset(page, 0, _SEGMENT_SIZE);
163                                 }
164                                 pmd_val(*pm_dir) = __pa(page) | sgt_prot;
165                                 address = (address + PMD_SIZE) & PMD_MASK;
166                                 continue;
167                         }
168
169                         pt_dir = kasan_early_pte_alloc();
170                         pmd_populate(&init_mm, pm_dir, pt_dir);
171                 } else if (pmd_large(*pm_dir)) {
172                         address = (address + PMD_SIZE) & PMD_MASK;
173                         continue;
174                 }
175
176                 pt_dir = pte_offset_kernel(pm_dir, address);
177                 if (pte_none(*pt_dir)) {
178                         void *page;
179
180                         switch (mode) {
181                         case POPULATE_ONE2ONE:
182                                 page = (void *)address;
183                                 pte_val(*pt_dir) = __pa(page) | pgt_prot;
184                                 break;
185                         case POPULATE_MAP:
186                                 page = kasan_early_alloc_pages(0);
187                                 memset(page, 0, PAGE_SIZE);
188                                 pte_val(*pt_dir) = __pa(page) | pgt_prot;
189                                 break;
190                         case POPULATE_ZERO_SHADOW:
191                                 page = kasan_zero_page;
192                                 pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
193                                 break;
194                         }
195                 }
196                 address += PAGE_SIZE;
197         }
198 }
199
200 static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
201 {
202         unsigned long asce_bits;
203
204         asce_bits = asce_type | _ASCE_TABLE_LENGTH;
205         S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
206         S390_lowcore.user_asce = S390_lowcore.kernel_asce;
207
208         __ctl_load(S390_lowcore.kernel_asce, 1, 1);
209         __ctl_load(S390_lowcore.kernel_asce, 7, 7);
210         __ctl_load(S390_lowcore.kernel_asce, 13, 13);
211 }
212
213 static void __init kasan_enable_dat(void)
214 {
215         psw_t psw;
216
217         psw.mask = __extract_psw();
218         psw_bits(psw).dat = 1;
219         psw_bits(psw).as = PSW_BITS_AS_HOME;
220         __load_psw_mask(psw.mask);
221 }
222
223 static void __init kasan_early_detect_facilities(void)
224 {
225         __stfle(S390_lowcore.stfle_fac_list,
226                 ARRAY_SIZE(S390_lowcore.stfle_fac_list));
227         if (test_facility(8)) {
228                 has_edat = true;
229                 __ctl_set_bit(0, 23);
230         }
231         if (!noexec_disabled && test_facility(130)) {
232                 has_nx = true;
233                 __ctl_set_bit(0, 20);
234         }
235 }
236
237 static unsigned long __init get_mem_detect_end(void)
238 {
239         unsigned long start;
240         unsigned long end;
241
242         if (mem_detect.count) {
243                 __get_mem_detect_block(mem_detect.count - 1, &start, &end);
244                 return end;
245         }
246         return 0;
247 }
248
249 void __init kasan_early_init(void)
250 {
251         unsigned long untracked_mem_end;
252         unsigned long shadow_alloc_size;
253         unsigned long initrd_end;
254         unsigned long asce_type;
255         unsigned long memsize;
256         unsigned long vmax;
257         unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
258         pte_t pte_z;
259         pmd_t pmd_z = __pmd(__pa(kasan_zero_pte) | _SEGMENT_ENTRY);
260         pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY);
261         p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY);
262
263         kasan_early_detect_facilities();
264         if (!has_nx)
265                 pgt_prot &= ~_PAGE_NOEXEC;
266         pte_z = __pte(__pa(kasan_zero_page) | pgt_prot);
267
268         memsize = get_mem_detect_end();
269         if (!memsize)
270                 kasan_early_panic("cannot detect physical memory size\n");
271         /* respect mem= cmdline parameter */
272         if (memory_end_set && memsize > memory_end)
273                 memsize = memory_end;
274         memsize = min(memsize, KASAN_SHADOW_START);
275
276         if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
277                 /* 4 level paging */
278                 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
279                 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
280                 crst_table_init((unsigned long *)early_pg_dir,
281                                 _REGION2_ENTRY_EMPTY);
282                 untracked_mem_end = vmax = _REGION1_SIZE;
283                 asce_type = _ASCE_TYPE_REGION2;
284         } else {
285                 /* 3 level paging */
286                 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
287                 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
288                 crst_table_init((unsigned long *)early_pg_dir,
289                                 _REGION3_ENTRY_EMPTY);
290                 untracked_mem_end = vmax = _REGION2_SIZE;
291                 asce_type = _ASCE_TYPE_REGION3;
292         }
293
294         /* init kasan zero shadow */
295         crst_table_init((unsigned long *)kasan_zero_p4d, p4d_val(p4d_z));
296         crst_table_init((unsigned long *)kasan_zero_pud, pud_val(pud_z));
297         crst_table_init((unsigned long *)kasan_zero_pmd, pmd_val(pmd_z));
298         memset64((u64 *)kasan_zero_pte, pte_val(pte_z), PTRS_PER_PTE);
299
300         shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
301         pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
302         if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
303                 initrd_end =
304                     round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
305                 pgalloc_low = max(pgalloc_low, initrd_end);
306         }
307
308         if (pgalloc_low + shadow_alloc_size > memsize)
309                 kasan_early_panic("out of memory during initialisation\n");
310
311         if (has_edat) {
312                 segment_pos = round_down(memsize, _SEGMENT_SIZE);
313                 segment_low = segment_pos - shadow_alloc_size;
314                 pgalloc_pos = segment_low;
315         } else {
316                 pgalloc_pos = memsize;
317         }
318         init_mm.pgd = early_pg_dir;
319         /*
320          * Current memory layout:
321          * +- 0 -------------+   +- shadow start -+
322          * | 1:1 ram mapping |  /| 1/8 ram        |
323          * +- end of ram ----+ / +----------------+
324          * | ... gap ...     |/  |      kasan     |
325          * +- shadow start --+   |      zero      |
326          * | 1/8 addr space  |   |      page      |
327          * +- shadow end    -+   |      mapping   |
328          * | ... gap ...     |\  |    (untracked) |
329          * +- modules vaddr -+ \ +----------------+
330          * | 2Gb             |  \|      unmapped  | allocated per module
331          * +-----------------+   +- shadow end ---+
332          */
333         /* populate kasan shadow (for identity mapping and zero page mapping) */
334         kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
335         if (IS_ENABLED(CONFIG_MODULES))
336                 untracked_mem_end = vmax - MODULES_LEN;
337         kasan_early_vmemmap_populate(__sha(max_physmem_end),
338                                      __sha(untracked_mem_end),
339                                      POPULATE_ZERO_SHADOW);
340         /* memory allocated for identity mapping structs will be freed later */
341         pgalloc_freeable = pgalloc_pos;
342         /* populate identity mapping */
343         kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
344         kasan_set_pgd(early_pg_dir, asce_type);
345         kasan_enable_dat();
346         /* enable kasan */
347         init_task.kasan_depth = 0;
348         memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
349         sclp_early_printk("KernelAddressSanitizer initialized\n");
350 }
351
352 void __init kasan_copy_shadow(pgd_t *pg_dir)
353 {
354         /*
355          * At this point we are still running on early pages setup early_pg_dir,
356          * while swapper_pg_dir has just been initialized with identity mapping.
357          * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
358          */
359
360         pgd_t *pg_dir_src;
361         pgd_t *pg_dir_dst;
362         p4d_t *p4_dir_src;
363         p4d_t *p4_dir_dst;
364         pud_t *pu_dir_src;
365         pud_t *pu_dir_dst;
366
367         pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
368         pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
369         p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
370         p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
371         if (!p4d_folded(*p4_dir_src)) {
372                 /* 4 level paging */
373                 memcpy(p4_dir_dst, p4_dir_src,
374                        (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
375                 return;
376         }
377         /* 3 level paging */
378         pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
379         pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
380         memcpy(pu_dir_dst, pu_dir_src,
381                (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
382 }
383
384 void __init kasan_free_early_identity(void)
385 {
386         memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
387 }