Merge tag 'sysctl-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof...
[sfrench/cifs-2.6.git] / arch / loongarch / mm / kasan_init.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2023 Loongson Technology Corporation Limited
4  */
5 #define pr_fmt(fmt) "kasan: " fmt
6 #include <linux/kasan.h>
7 #include <linux/memblock.h>
8 #include <linux/sched/task.h>
9
10 #include <asm/tlbflush.h>
11 #include <asm/pgalloc.h>
12 #include <asm-generic/sections.h>
13
14 static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
15
16 #ifdef __PAGETABLE_PUD_FOLDED
17 #define __p4d_none(early, p4d) (0)
18 #else
19 #define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
20 (__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
21 #endif
22
23 #ifdef __PAGETABLE_PMD_FOLDED
24 #define __pud_none(early, pud) (0)
25 #else
26 #define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
27 (__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
28 #endif
29
30 #define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
31 (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
32
33 #define __pte_none(early, pte) (early ? pte_none(pte) : \
34 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
35
36 bool kasan_early_stage = true;
37
38 void *kasan_mem_to_shadow(const void *addr)
39 {
40         if (!kasan_arch_is_ready()) {
41                 return (void *)(kasan_early_shadow_page);
42         } else {
43                 unsigned long maddr = (unsigned long)addr;
44                 unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
45                 unsigned long offset = 0;
46
47                 maddr &= XRANGE_SHADOW_MASK;
48                 switch (xrange) {
49                 case XKPRANGE_CC_SEG:
50                         offset = XKPRANGE_CC_SHADOW_OFFSET;
51                         break;
52                 case XKPRANGE_UC_SEG:
53                         offset = XKPRANGE_UC_SHADOW_OFFSET;
54                         break;
55                 case XKVRANGE_VC_SEG:
56                         offset = XKVRANGE_VC_SHADOW_OFFSET;
57                         break;
58                 default:
59                         WARN_ON(1);
60                         return NULL;
61                 }
62
63                 return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
64         }
65 }
66
67 const void *kasan_shadow_to_mem(const void *shadow_addr)
68 {
69         unsigned long addr = (unsigned long)shadow_addr;
70
71         if (unlikely(addr > KASAN_SHADOW_END) ||
72                 unlikely(addr < KASAN_SHADOW_START)) {
73                 WARN_ON(1);
74                 return NULL;
75         }
76
77         if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
78                 return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
79         else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
80                 return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
81         else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
82                 return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
83         else {
84                 WARN_ON(1);
85                 return NULL;
86         }
87 }
88
89 /*
90  * Alloc memory for shadow memory page table.
91  */
92 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
93 {
94         void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
95                                         __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
96         if (!p)
97                 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
98                         __func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
99
100         return __pa(p);
101 }
102
103 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
104 {
105         if (__pmd_none(early, READ_ONCE(*pmdp))) {
106                 phys_addr_t pte_phys = early ?
107                                 __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
108                 if (!early)
109                         memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
110                 pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
111         }
112
113         return pte_offset_kernel(pmdp, addr);
114 }
115
116 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
117 {
118         if (__pud_none(early, READ_ONCE(*pudp))) {
119                 phys_addr_t pmd_phys = early ?
120                                 __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
121                 if (!early)
122                         memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd));
123                 pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys));
124         }
125
126         return pmd_offset(pudp, addr);
127 }
128
129 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
130 {
131         if (__p4d_none(early, READ_ONCE(*p4dp))) {
132                 phys_addr_t pud_phys = early ?
133                         __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
134                 if (!early)
135                         memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud));
136                 p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys));
137         }
138
139         return pud_offset(p4dp, addr);
140 }
141
142 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
143                                       unsigned long end, int node, bool early)
144 {
145         unsigned long next;
146         pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
147
148         do {
149                 phys_addr_t page_phys = early ?
150                                         __pa_symbol(kasan_early_shadow_page)
151                                               : kasan_alloc_zeroed_page(node);
152                 next = addr + PAGE_SIZE;
153                 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
154         } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
155 }
156
157 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
158                                       unsigned long end, int node, bool early)
159 {
160         unsigned long next;
161         pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
162
163         do {
164                 next = pmd_addr_end(addr, end);
165                 kasan_pte_populate(pmdp, addr, next, node, early);
166         } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
167 }
168
169 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
170                                             unsigned long end, int node, bool early)
171 {
172         unsigned long next;
173         pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
174
175         do {
176                 next = pud_addr_end(addr, end);
177                 kasan_pmd_populate(pudp, addr, next, node, early);
178         } while (pudp++, addr = next, addr != end);
179 }
180
181 static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
182                                             unsigned long end, int node, bool early)
183 {
184         unsigned long next;
185         p4d_t *p4dp = p4d_offset(pgdp, addr);
186
187         do {
188                 next = p4d_addr_end(addr, end);
189                 kasan_pud_populate(p4dp, addr, next, node, early);
190         } while (p4dp++, addr = next, addr != end);
191 }
192
193 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
194                                       int node, bool early)
195 {
196         unsigned long next;
197         pgd_t *pgdp;
198
199         pgdp = pgd_offset_k(addr);
200
201         do {
202                 next = pgd_addr_end(addr, end);
203                 kasan_p4d_populate(pgdp, addr, next, node, early);
204         } while (pgdp++, addr = next, addr != end);
205
206 }
207
208 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
209 static void __init kasan_map_populate(unsigned long start, unsigned long end,
210                                       int node)
211 {
212         kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
213 }
214
215 asmlinkage void __init kasan_early_init(void)
216 {
217         BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
218         BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
219 }
220
221 static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
222 {
223         WRITE_ONCE(*pgdp, pgdval);
224 }
225
226 static void __init clear_pgds(unsigned long start, unsigned long end)
227 {
228         /*
229          * Remove references to kasan page tables from
230          * swapper_pg_dir. pgd_clear() can't be used
231          * here because it's nop on 2,3-level pagetable setups
232          */
233         for (; start < end; start += PGDIR_SIZE)
234                 kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
235 }
236
237 void __init kasan_init(void)
238 {
239         u64 i;
240         phys_addr_t pa_start, pa_end;
241
242         /*
243          * PGD was populated as invalid_pmd_table or invalid_pud_table
244          * in pagetable_init() which depends on how many levels of page
245          * table you are using, but we had to clean the gpd of kasan
246          * shadow memory, as the pgd value is none-zero.
247          * The assertion pgd_none is going to be false and the formal populate
248          * afterwards is not going to create any new pgd at all.
249          */
250         memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
251         csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
252         local_flush_tlb_all();
253
254         clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
255
256         /* Maps everything to a single page of zeroes */
257         kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
258
259         kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
260                                         kasan_mem_to_shadow((void *)KFENCE_AREA_END));
261
262         kasan_early_stage = false;
263
264         /* Populate the linear mapping */
265         for_each_mem_range(i, &pa_start, &pa_end) {
266                 void *start = (void *)phys_to_virt(pa_start);
267                 void *end   = (void *)phys_to_virt(pa_end);
268
269                 if (start >= end)
270                         break;
271
272                 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
273                         (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
274         }
275
276         /* Populate modules mapping */
277         kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
278                 (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
279         /*
280          * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
281          * should make sure that it maps the zero page read-only.
282          */
283         for (i = 0; i < PTRS_PER_PTE; i++)
284                 set_pte(&kasan_early_shadow_pte[i],
285                         pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
286
287         memset(kasan_early_shadow_page, 0, PAGE_SIZE);
288         csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
289         local_flush_tlb_all();
290
291         /* At this point kasan is fully initialized. Enable error messages */
292         init_task.kasan_depth = 0;
293         pr_info("KernelAddressSanitizer initialized.\n");
294 }