Merge tag 'riscv-for-linus-5.10-mw1' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / arm64 / mm / mmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/mm/mmu.c
4  *
5  * Copyright (C) 1995-2005 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8
9 #include <linux/cache.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/kexec.h>
16 #include <linux/libfdt.h>
17 #include <linux/mman.h>
18 #include <linux/nodemask.h>
19 #include <linux/memblock.h>
20 #include <linux/memory.h>
21 #include <linux/fs.h>
22 #include <linux/io.h>
23 #include <linux/mm.h>
24 #include <linux/vmalloc.h>
25
26 #include <asm/barrier.h>
27 #include <asm/cputype.h>
28 #include <asm/fixmap.h>
29 #include <asm/kasan.h>
30 #include <asm/kernel-pgtable.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <linux/sizes.h>
34 #include <asm/tlb.h>
35 #include <asm/mmu_context.h>
36 #include <asm/ptdump.h>
37 #include <asm/tlbflush.h>
38 #include <asm/pgalloc.h>
39
40 #define NO_BLOCK_MAPPINGS       BIT(0)
41 #define NO_CONT_MAPPINGS        BIT(1)
42
43 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
44 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
45
46 u64 __section(.mmuoff.data.write) vabits_actual;
47 EXPORT_SYMBOL(vabits_actual);
48
49 u64 kimage_voffset __ro_after_init;
50 EXPORT_SYMBOL(kimage_voffset);
51
52 /*
53  * Empty_zero_page is a special page that is used for zero-initialized data
54  * and COW.
55  */
56 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
57 EXPORT_SYMBOL(empty_zero_page);
58
59 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
60 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
61 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
62
63 static DEFINE_SPINLOCK(swapper_pgdir_lock);
64
65 void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
66 {
67         pgd_t *fixmap_pgdp;
68
69         spin_lock(&swapper_pgdir_lock);
70         fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
71         WRITE_ONCE(*fixmap_pgdp, pgd);
72         /*
73          * We need dsb(ishst) here to ensure the page-table-walker sees
74          * our new entry before set_p?d() returns. The fixmap's
75          * flush_tlb_kernel_range() via clear_fixmap() does this for us.
76          */
77         pgd_clear_fixmap();
78         spin_unlock(&swapper_pgdir_lock);
79 }
80
81 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
82                               unsigned long size, pgprot_t vma_prot)
83 {
84         if (!pfn_valid(pfn))
85                 return pgprot_noncached(vma_prot);
86         else if (file->f_flags & O_SYNC)
87                 return pgprot_writecombine(vma_prot);
88         return vma_prot;
89 }
90 EXPORT_SYMBOL(phys_mem_access_prot);
91
92 static phys_addr_t __init early_pgtable_alloc(int shift)
93 {
94         phys_addr_t phys;
95         void *ptr;
96
97         phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
98         if (!phys)
99                 panic("Failed to allocate page table page\n");
100
101         /*
102          * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
103          * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
104          * any level of table.
105          */
106         ptr = pte_set_fixmap(phys);
107
108         memset(ptr, 0, PAGE_SIZE);
109
110         /*
111          * Implicit barriers also ensure the zeroed page is visible to the page
112          * table walker
113          */
114         pte_clear_fixmap();
115
116         return phys;
117 }
118
119 static bool pgattr_change_is_safe(u64 old, u64 new)
120 {
121         /*
122          * The following mapping attributes may be updated in live
123          * kernel mappings without the need for break-before-make.
124          */
125         pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
126
127         /* creating or taking down mappings is always safe */
128         if (old == 0 || new == 0)
129                 return true;
130
131         /* live contiguous mappings may not be manipulated at all */
132         if ((old | new) & PTE_CONT)
133                 return false;
134
135         /* Transitioning from Non-Global to Global is unsafe */
136         if (old & ~new & PTE_NG)
137                 return false;
138
139         /*
140          * Changing the memory type between Normal and Normal-Tagged is safe
141          * since Tagged is considered a permission attribute from the
142          * mismatched attribute aliases perspective.
143          */
144         if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
145              (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
146             ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
147              (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
148                 mask |= PTE_ATTRINDX_MASK;
149
150         return ((old ^ new) & ~mask) == 0;
151 }
152
153 static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
154                      phys_addr_t phys, pgprot_t prot)
155 {
156         pte_t *ptep;
157
158         ptep = pte_set_fixmap_offset(pmdp, addr);
159         do {
160                 pte_t old_pte = READ_ONCE(*ptep);
161
162                 set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
163
164                 /*
165                  * After the PTE entry has been populated once, we
166                  * only allow updates to the permission attributes.
167                  */
168                 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
169                                               READ_ONCE(pte_val(*ptep))));
170
171                 phys += PAGE_SIZE;
172         } while (ptep++, addr += PAGE_SIZE, addr != end);
173
174         pte_clear_fixmap();
175 }
176
177 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
178                                 unsigned long end, phys_addr_t phys,
179                                 pgprot_t prot,
180                                 phys_addr_t (*pgtable_alloc)(int),
181                                 int flags)
182 {
183         unsigned long next;
184         pmd_t pmd = READ_ONCE(*pmdp);
185
186         BUG_ON(pmd_sect(pmd));
187         if (pmd_none(pmd)) {
188                 phys_addr_t pte_phys;
189                 BUG_ON(!pgtable_alloc);
190                 pte_phys = pgtable_alloc(PAGE_SHIFT);
191                 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
192                 pmd = READ_ONCE(*pmdp);
193         }
194         BUG_ON(pmd_bad(pmd));
195
196         do {
197                 pgprot_t __prot = prot;
198
199                 next = pte_cont_addr_end(addr, end);
200
201                 /* use a contiguous mapping if the range is suitably aligned */
202                 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
203                     (flags & NO_CONT_MAPPINGS) == 0)
204                         __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
205
206                 init_pte(pmdp, addr, next, phys, __prot);
207
208                 phys += next - addr;
209         } while (addr = next, addr != end);
210 }
211
212 static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
213                      phys_addr_t phys, pgprot_t prot,
214                      phys_addr_t (*pgtable_alloc)(int), int flags)
215 {
216         unsigned long next;
217         pmd_t *pmdp;
218
219         pmdp = pmd_set_fixmap_offset(pudp, addr);
220         do {
221                 pmd_t old_pmd = READ_ONCE(*pmdp);
222
223                 next = pmd_addr_end(addr, end);
224
225                 /* try section mapping first */
226                 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
227                     (flags & NO_BLOCK_MAPPINGS) == 0) {
228                         pmd_set_huge(pmdp, phys, prot);
229
230                         /*
231                          * After the PMD entry has been populated once, we
232                          * only allow updates to the permission attributes.
233                          */
234                         BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
235                                                       READ_ONCE(pmd_val(*pmdp))));
236                 } else {
237                         alloc_init_cont_pte(pmdp, addr, next, phys, prot,
238                                             pgtable_alloc, flags);
239
240                         BUG_ON(pmd_val(old_pmd) != 0 &&
241                                pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
242                 }
243                 phys += next - addr;
244         } while (pmdp++, addr = next, addr != end);
245
246         pmd_clear_fixmap();
247 }
248
249 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
250                                 unsigned long end, phys_addr_t phys,
251                                 pgprot_t prot,
252                                 phys_addr_t (*pgtable_alloc)(int), int flags)
253 {
254         unsigned long next;
255         pud_t pud = READ_ONCE(*pudp);
256
257         /*
258          * Check for initial section mappings in the pgd/pud.
259          */
260         BUG_ON(pud_sect(pud));
261         if (pud_none(pud)) {
262                 phys_addr_t pmd_phys;
263                 BUG_ON(!pgtable_alloc);
264                 pmd_phys = pgtable_alloc(PMD_SHIFT);
265                 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
266                 pud = READ_ONCE(*pudp);
267         }
268         BUG_ON(pud_bad(pud));
269
270         do {
271                 pgprot_t __prot = prot;
272
273                 next = pmd_cont_addr_end(addr, end);
274
275                 /* use a contiguous mapping if the range is suitably aligned */
276                 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
277                     (flags & NO_CONT_MAPPINGS) == 0)
278                         __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
279
280                 init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
281
282                 phys += next - addr;
283         } while (addr = next, addr != end);
284 }
285
286 static inline bool use_1G_block(unsigned long addr, unsigned long next,
287                         unsigned long phys)
288 {
289         if (PAGE_SHIFT != 12)
290                 return false;
291
292         if (((addr | next | phys) & ~PUD_MASK) != 0)
293                 return false;
294
295         return true;
296 }
297
298 static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
299                            phys_addr_t phys, pgprot_t prot,
300                            phys_addr_t (*pgtable_alloc)(int),
301                            int flags)
302 {
303         unsigned long next;
304         pud_t *pudp;
305         p4d_t *p4dp = p4d_offset(pgdp, addr);
306         p4d_t p4d = READ_ONCE(*p4dp);
307
308         if (p4d_none(p4d)) {
309                 phys_addr_t pud_phys;
310                 BUG_ON(!pgtable_alloc);
311                 pud_phys = pgtable_alloc(PUD_SHIFT);
312                 __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
313                 p4d = READ_ONCE(*p4dp);
314         }
315         BUG_ON(p4d_bad(p4d));
316
317         pudp = pud_set_fixmap_offset(p4dp, addr);
318         do {
319                 pud_t old_pud = READ_ONCE(*pudp);
320
321                 next = pud_addr_end(addr, end);
322
323                 /*
324                  * For 4K granule only, attempt to put down a 1GB block
325                  */
326                 if (use_1G_block(addr, next, phys) &&
327                     (flags & NO_BLOCK_MAPPINGS) == 0) {
328                         pud_set_huge(pudp, phys, prot);
329
330                         /*
331                          * After the PUD entry has been populated once, we
332                          * only allow updates to the permission attributes.
333                          */
334                         BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
335                                                       READ_ONCE(pud_val(*pudp))));
336                 } else {
337                         alloc_init_cont_pmd(pudp, addr, next, phys, prot,
338                                             pgtable_alloc, flags);
339
340                         BUG_ON(pud_val(old_pud) != 0 &&
341                                pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
342                 }
343                 phys += next - addr;
344         } while (pudp++, addr = next, addr != end);
345
346         pud_clear_fixmap();
347 }
348
349 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
350                                  unsigned long virt, phys_addr_t size,
351                                  pgprot_t prot,
352                                  phys_addr_t (*pgtable_alloc)(int),
353                                  int flags)
354 {
355         unsigned long addr, end, next;
356         pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
357
358         /*
359          * If the virtual and physical address don't have the same offset
360          * within a page, we cannot map the region as the caller expects.
361          */
362         if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
363                 return;
364
365         phys &= PAGE_MASK;
366         addr = virt & PAGE_MASK;
367         end = PAGE_ALIGN(virt + size);
368
369         do {
370                 next = pgd_addr_end(addr, end);
371                 alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
372                                flags);
373                 phys += next - addr;
374         } while (pgdp++, addr = next, addr != end);
375 }
376
377 static phys_addr_t __pgd_pgtable_alloc(int shift)
378 {
379         void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
380         BUG_ON(!ptr);
381
382         /* Ensure the zeroed page is visible to the page table walker */
383         dsb(ishst);
384         return __pa(ptr);
385 }
386
387 static phys_addr_t pgd_pgtable_alloc(int shift)
388 {
389         phys_addr_t pa = __pgd_pgtable_alloc(shift);
390
391         /*
392          * Call proper page table ctor in case later we need to
393          * call core mm functions like apply_to_page_range() on
394          * this pre-allocated page table.
395          *
396          * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
397          * folded, and if so pgtable_pmd_page_ctor() becomes nop.
398          */
399         if (shift == PAGE_SHIFT)
400                 BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
401         else if (shift == PMD_SHIFT)
402                 BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
403
404         return pa;
405 }
406
407 /*
408  * This function can only be used to modify existing table entries,
409  * without allocating new levels of table. Note that this permits the
410  * creation of new section or page entries.
411  */
412 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
413                                   phys_addr_t size, pgprot_t prot)
414 {
415         if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
416                 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
417                         &phys, virt);
418                 return;
419         }
420         __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
421                              NO_CONT_MAPPINGS);
422 }
423
424 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
425                                unsigned long virt, phys_addr_t size,
426                                pgprot_t prot, bool page_mappings_only)
427 {
428         int flags = 0;
429
430         BUG_ON(mm == &init_mm);
431
432         if (page_mappings_only)
433                 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
434
435         __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
436                              pgd_pgtable_alloc, flags);
437 }
438
439 static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
440                                 phys_addr_t size, pgprot_t prot)
441 {
442         if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
443                 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
444                         &phys, virt);
445                 return;
446         }
447
448         __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
449                              NO_CONT_MAPPINGS);
450
451         /* flush the TLBs after updating live kernel mappings */
452         flush_tlb_kernel_range(virt, virt + size);
453 }
454
455 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
456                                   phys_addr_t end, pgprot_t prot, int flags)
457 {
458         __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
459                              prot, early_pgtable_alloc, flags);
460 }
461
462 void __init mark_linear_text_alias_ro(void)
463 {
464         /*
465          * Remove the write permissions from the linear alias of .text/.rodata
466          */
467         update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
468                             (unsigned long)__init_begin - (unsigned long)_text,
469                             PAGE_KERNEL_RO);
470 }
471
472 static void __init map_mem(pgd_t *pgdp)
473 {
474         phys_addr_t kernel_start = __pa_symbol(_text);
475         phys_addr_t kernel_end = __pa_symbol(__init_begin);
476         phys_addr_t start, end;
477         int flags = 0;
478         u64 i;
479
480         if (rodata_full || debug_pagealloc_enabled())
481                 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
482
483         /*
484          * Take care not to create a writable alias for the
485          * read-only text and rodata sections of the kernel image.
486          * So temporarily mark them as NOMAP to skip mappings in
487          * the following for-loop
488          */
489         memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
490 #ifdef CONFIG_KEXEC_CORE
491         if (crashk_res.end)
492                 memblock_mark_nomap(crashk_res.start,
493                                     resource_size(&crashk_res));
494 #endif
495
496         /* map all the memory banks */
497         for_each_mem_range(i, &start, &end) {
498                 if (start >= end)
499                         break;
500                 /*
501                  * The linear map must allow allocation tags reading/writing
502                  * if MTE is present. Otherwise, it has the same attributes as
503                  * PAGE_KERNEL.
504                  */
505                 __map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
506         }
507
508         /*
509          * Map the linear alias of the [_text, __init_begin) interval
510          * as non-executable now, and remove the write permission in
511          * mark_linear_text_alias_ro() below (which will be called after
512          * alternative patching has completed). This makes the contents
513          * of the region accessible to subsystems such as hibernate,
514          * but protects it from inadvertent modification or execution.
515          * Note that contiguous mappings cannot be remapped in this way,
516          * so we should avoid them here.
517          */
518         __map_memblock(pgdp, kernel_start, kernel_end,
519                        PAGE_KERNEL, NO_CONT_MAPPINGS);
520         memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
521
522 #ifdef CONFIG_KEXEC_CORE
523         /*
524          * Use page-level mappings here so that we can shrink the region
525          * in page granularity and put back unused memory to buddy system
526          * through /sys/kernel/kexec_crash_size interface.
527          */
528         if (crashk_res.end) {
529                 __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
530                                PAGE_KERNEL,
531                                NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
532                 memblock_clear_nomap(crashk_res.start,
533                                      resource_size(&crashk_res));
534         }
535 #endif
536 }
537
538 void mark_rodata_ro(void)
539 {
540         unsigned long section_size;
541
542         /*
543          * mark .rodata as read only. Use __init_begin rather than __end_rodata
544          * to cover NOTES and EXCEPTION_TABLE.
545          */
546         section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
547         update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
548                             section_size, PAGE_KERNEL_RO);
549
550         debug_checkwx();
551 }
552
553 static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
554                                       pgprot_t prot, struct vm_struct *vma,
555                                       int flags, unsigned long vm_flags)
556 {
557         phys_addr_t pa_start = __pa_symbol(va_start);
558         unsigned long size = va_end - va_start;
559
560         BUG_ON(!PAGE_ALIGNED(pa_start));
561         BUG_ON(!PAGE_ALIGNED(size));
562
563         __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
564                              early_pgtable_alloc, flags);
565
566         if (!(vm_flags & VM_NO_GUARD))
567                 size += PAGE_SIZE;
568
569         vma->addr       = va_start;
570         vma->phys_addr  = pa_start;
571         vma->size       = size;
572         vma->flags      = VM_MAP | vm_flags;
573         vma->caller     = __builtin_return_address(0);
574
575         vm_area_add_early(vma);
576 }
577
578 static int __init parse_rodata(char *arg)
579 {
580         int ret = strtobool(arg, &rodata_enabled);
581         if (!ret) {
582                 rodata_full = false;
583                 return 0;
584         }
585
586         /* permit 'full' in addition to boolean options */
587         if (strcmp(arg, "full"))
588                 return -EINVAL;
589
590         rodata_enabled = true;
591         rodata_full = true;
592         return 0;
593 }
594 early_param("rodata", parse_rodata);
595
596 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
597 static int __init map_entry_trampoline(void)
598 {
599         pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
600         phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
601
602         /* The trampoline is always mapped and can therefore be global */
603         pgprot_val(prot) &= ~PTE_NG;
604
605         /* Map only the text into the trampoline page table */
606         memset(tramp_pg_dir, 0, PGD_SIZE);
607         __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
608                              prot, __pgd_pgtable_alloc, 0);
609
610         /* Map both the text and data into the kernel page table */
611         __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
612         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
613                 extern char __entry_tramp_data_start[];
614
615                 __set_fixmap(FIX_ENTRY_TRAMP_DATA,
616                              __pa_symbol(__entry_tramp_data_start),
617                              PAGE_KERNEL_RO);
618         }
619
620         return 0;
621 }
622 core_initcall(map_entry_trampoline);
623 #endif
624
625 /*
626  * Open coded check for BTI, only for use to determine configuration
627  * for early mappings for before the cpufeature code has run.
628  */
629 static bool arm64_early_this_cpu_has_bti(void)
630 {
631         u64 pfr1;
632
633         if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
634                 return false;
635
636         pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1);
637         return cpuid_feature_extract_unsigned_field(pfr1,
638                                                     ID_AA64PFR1_BT_SHIFT);
639 }
640
641 /*
642  * Create fine-grained mappings for the kernel.
643  */
644 static void __init map_kernel(pgd_t *pgdp)
645 {
646         static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
647                                 vmlinux_initdata, vmlinux_data;
648
649         /*
650          * External debuggers may need to write directly to the text
651          * mapping to install SW breakpoints. Allow this (only) when
652          * explicitly requested with rodata=off.
653          */
654         pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
655
656         /*
657          * If we have a CPU that supports BTI and a kernel built for
658          * BTI then mark the kernel executable text as guarded pages
659          * now so we don't have to rewrite the page tables later.
660          */
661         if (arm64_early_this_cpu_has_bti())
662                 text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
663
664         /*
665          * Only rodata will be remapped with different permissions later on,
666          * all other segments are allowed to use contiguous mappings.
667          */
668         map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
669                            VM_NO_GUARD);
670         map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
671                            &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
672         map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
673                            &vmlinux_inittext, 0, VM_NO_GUARD);
674         map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
675                            &vmlinux_initdata, 0, VM_NO_GUARD);
676         map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
677
678         if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) {
679                 /*
680                  * The fixmap falls in a separate pgd to the kernel, and doesn't
681                  * live in the carveout for the swapper_pg_dir. We can simply
682                  * re-use the existing dir for the fixmap.
683                  */
684                 set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START),
685                         READ_ONCE(*pgd_offset_k(FIXADDR_START)));
686         } else if (CONFIG_PGTABLE_LEVELS > 3) {
687                 pgd_t *bm_pgdp;
688                 p4d_t *bm_p4dp;
689                 pud_t *bm_pudp;
690                 /*
691                  * The fixmap shares its top level pgd entry with the kernel
692                  * mapping. This can really only occur when we are running
693                  * with 16k/4 levels, so we can simply reuse the pud level
694                  * entry instead.
695                  */
696                 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
697                 bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START);
698                 bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
699                 bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
700                 pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
701                 pud_clear_fixmap();
702         } else {
703                 BUG();
704         }
705
706         kasan_copy_shadow(pgdp);
707 }
708
709 void __init paging_init(void)
710 {
711         pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
712
713         map_kernel(pgdp);
714         map_mem(pgdp);
715
716         pgd_clear_fixmap();
717
718         cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
719         init_mm.pgd = swapper_pg_dir;
720
721         memblock_free(__pa_symbol(init_pg_dir),
722                       __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
723
724         memblock_allow_resize();
725 }
726
727 /*
728  * Check whether a kernel address is valid (derived from arch/x86/).
729  */
730 int kern_addr_valid(unsigned long addr)
731 {
732         pgd_t *pgdp;
733         p4d_t *p4dp;
734         pud_t *pudp, pud;
735         pmd_t *pmdp, pmd;
736         pte_t *ptep, pte;
737
738         addr = arch_kasan_reset_tag(addr);
739         if ((((long)addr) >> VA_BITS) != -1UL)
740                 return 0;
741
742         pgdp = pgd_offset_k(addr);
743         if (pgd_none(READ_ONCE(*pgdp)))
744                 return 0;
745
746         p4dp = p4d_offset(pgdp, addr);
747         if (p4d_none(READ_ONCE(*p4dp)))
748                 return 0;
749
750         pudp = pud_offset(p4dp, addr);
751         pud = READ_ONCE(*pudp);
752         if (pud_none(pud))
753                 return 0;
754
755         if (pud_sect(pud))
756                 return pfn_valid(pud_pfn(pud));
757
758         pmdp = pmd_offset(pudp, addr);
759         pmd = READ_ONCE(*pmdp);
760         if (pmd_none(pmd))
761                 return 0;
762
763         if (pmd_sect(pmd))
764                 return pfn_valid(pmd_pfn(pmd));
765
766         ptep = pte_offset_kernel(pmdp, addr);
767         pte = READ_ONCE(*ptep);
768         if (pte_none(pte))
769                 return 0;
770
771         return pfn_valid(pte_pfn(pte));
772 }
773
774 #ifdef CONFIG_MEMORY_HOTPLUG
775 static void free_hotplug_page_range(struct page *page, size_t size,
776                                     struct vmem_altmap *altmap)
777 {
778         if (altmap) {
779                 vmem_altmap_free(altmap, size >> PAGE_SHIFT);
780         } else {
781                 WARN_ON(PageReserved(page));
782                 free_pages((unsigned long)page_address(page), get_order(size));
783         }
784 }
785
786 static void free_hotplug_pgtable_page(struct page *page)
787 {
788         free_hotplug_page_range(page, PAGE_SIZE, NULL);
789 }
790
791 static bool pgtable_range_aligned(unsigned long start, unsigned long end,
792                                   unsigned long floor, unsigned long ceiling,
793                                   unsigned long mask)
794 {
795         start &= mask;
796         if (start < floor)
797                 return false;
798
799         if (ceiling) {
800                 ceiling &= mask;
801                 if (!ceiling)
802                         return false;
803         }
804
805         if (end - 1 > ceiling - 1)
806                 return false;
807         return true;
808 }
809
810 static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
811                                     unsigned long end, bool free_mapped,
812                                     struct vmem_altmap *altmap)
813 {
814         pte_t *ptep, pte;
815
816         do {
817                 ptep = pte_offset_kernel(pmdp, addr);
818                 pte = READ_ONCE(*ptep);
819                 if (pte_none(pte))
820                         continue;
821
822                 WARN_ON(!pte_present(pte));
823                 pte_clear(&init_mm, addr, ptep);
824                 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
825                 if (free_mapped)
826                         free_hotplug_page_range(pte_page(pte),
827                                                 PAGE_SIZE, altmap);
828         } while (addr += PAGE_SIZE, addr < end);
829 }
830
831 static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
832                                     unsigned long end, bool free_mapped,
833                                     struct vmem_altmap *altmap)
834 {
835         unsigned long next;
836         pmd_t *pmdp, pmd;
837
838         do {
839                 next = pmd_addr_end(addr, end);
840                 pmdp = pmd_offset(pudp, addr);
841                 pmd = READ_ONCE(*pmdp);
842                 if (pmd_none(pmd))
843                         continue;
844
845                 WARN_ON(!pmd_present(pmd));
846                 if (pmd_sect(pmd)) {
847                         pmd_clear(pmdp);
848
849                         /*
850                          * One TLBI should be sufficient here as the PMD_SIZE
851                          * range is mapped with a single block entry.
852                          */
853                         flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
854                         if (free_mapped)
855                                 free_hotplug_page_range(pmd_page(pmd),
856                                                         PMD_SIZE, altmap);
857                         continue;
858                 }
859                 WARN_ON(!pmd_table(pmd));
860                 unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
861         } while (addr = next, addr < end);
862 }
863
864 static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
865                                     unsigned long end, bool free_mapped,
866                                     struct vmem_altmap *altmap)
867 {
868         unsigned long next;
869         pud_t *pudp, pud;
870
871         do {
872                 next = pud_addr_end(addr, end);
873                 pudp = pud_offset(p4dp, addr);
874                 pud = READ_ONCE(*pudp);
875                 if (pud_none(pud))
876                         continue;
877
878                 WARN_ON(!pud_present(pud));
879                 if (pud_sect(pud)) {
880                         pud_clear(pudp);
881
882                         /*
883                          * One TLBI should be sufficient here as the PUD_SIZE
884                          * range is mapped with a single block entry.
885                          */
886                         flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
887                         if (free_mapped)
888                                 free_hotplug_page_range(pud_page(pud),
889                                                         PUD_SIZE, altmap);
890                         continue;
891                 }
892                 WARN_ON(!pud_table(pud));
893                 unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
894         } while (addr = next, addr < end);
895 }
896
897 static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
898                                     unsigned long end, bool free_mapped,
899                                     struct vmem_altmap *altmap)
900 {
901         unsigned long next;
902         p4d_t *p4dp, p4d;
903
904         do {
905                 next = p4d_addr_end(addr, end);
906                 p4dp = p4d_offset(pgdp, addr);
907                 p4d = READ_ONCE(*p4dp);
908                 if (p4d_none(p4d))
909                         continue;
910
911                 WARN_ON(!p4d_present(p4d));
912                 unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
913         } while (addr = next, addr < end);
914 }
915
916 static void unmap_hotplug_range(unsigned long addr, unsigned long end,
917                                 bool free_mapped, struct vmem_altmap *altmap)
918 {
919         unsigned long next;
920         pgd_t *pgdp, pgd;
921
922         /*
923          * altmap can only be used as vmemmap mapping backing memory.
924          * In case the backing memory itself is not being freed, then
925          * altmap is irrelevant. Warn about this inconsistency when
926          * encountered.
927          */
928         WARN_ON(!free_mapped && altmap);
929
930         do {
931                 next = pgd_addr_end(addr, end);
932                 pgdp = pgd_offset_k(addr);
933                 pgd = READ_ONCE(*pgdp);
934                 if (pgd_none(pgd))
935                         continue;
936
937                 WARN_ON(!pgd_present(pgd));
938                 unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
939         } while (addr = next, addr < end);
940 }
941
942 static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
943                                  unsigned long end, unsigned long floor,
944                                  unsigned long ceiling)
945 {
946         pte_t *ptep, pte;
947         unsigned long i, start = addr;
948
949         do {
950                 ptep = pte_offset_kernel(pmdp, addr);
951                 pte = READ_ONCE(*ptep);
952
953                 /*
954                  * This is just a sanity check here which verifies that
955                  * pte clearing has been done by earlier unmap loops.
956                  */
957                 WARN_ON(!pte_none(pte));
958         } while (addr += PAGE_SIZE, addr < end);
959
960         if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
961                 return;
962
963         /*
964          * Check whether we can free the pte page if the rest of the
965          * entries are empty. Overlap with other regions have been
966          * handled by the floor/ceiling check.
967          */
968         ptep = pte_offset_kernel(pmdp, 0UL);
969         for (i = 0; i < PTRS_PER_PTE; i++) {
970                 if (!pte_none(READ_ONCE(ptep[i])))
971                         return;
972         }
973
974         pmd_clear(pmdp);
975         __flush_tlb_kernel_pgtable(start);
976         free_hotplug_pgtable_page(virt_to_page(ptep));
977 }
978
979 static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
980                                  unsigned long end, unsigned long floor,
981                                  unsigned long ceiling)
982 {
983         pmd_t *pmdp, pmd;
984         unsigned long i, next, start = addr;
985
986         do {
987                 next = pmd_addr_end(addr, end);
988                 pmdp = pmd_offset(pudp, addr);
989                 pmd = READ_ONCE(*pmdp);
990                 if (pmd_none(pmd))
991                         continue;
992
993                 WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
994                 free_empty_pte_table(pmdp, addr, next, floor, ceiling);
995         } while (addr = next, addr < end);
996
997         if (CONFIG_PGTABLE_LEVELS <= 2)
998                 return;
999
1000         if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
1001                 return;
1002
1003         /*
1004          * Check whether we can free the pmd page if the rest of the
1005          * entries are empty. Overlap with other regions have been
1006          * handled by the floor/ceiling check.
1007          */
1008         pmdp = pmd_offset(pudp, 0UL);
1009         for (i = 0; i < PTRS_PER_PMD; i++) {
1010                 if (!pmd_none(READ_ONCE(pmdp[i])))
1011                         return;
1012         }
1013
1014         pud_clear(pudp);
1015         __flush_tlb_kernel_pgtable(start);
1016         free_hotplug_pgtable_page(virt_to_page(pmdp));
1017 }
1018
1019 static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
1020                                  unsigned long end, unsigned long floor,
1021                                  unsigned long ceiling)
1022 {
1023         pud_t *pudp, pud;
1024         unsigned long i, next, start = addr;
1025
1026         do {
1027                 next = pud_addr_end(addr, end);
1028                 pudp = pud_offset(p4dp, addr);
1029                 pud = READ_ONCE(*pudp);
1030                 if (pud_none(pud))
1031                         continue;
1032
1033                 WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
1034                 free_empty_pmd_table(pudp, addr, next, floor, ceiling);
1035         } while (addr = next, addr < end);
1036
1037         if (CONFIG_PGTABLE_LEVELS <= 3)
1038                 return;
1039
1040         if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
1041                 return;
1042
1043         /*
1044          * Check whether we can free the pud page if the rest of the
1045          * entries are empty. Overlap with other regions have been
1046          * handled by the floor/ceiling check.
1047          */
1048         pudp = pud_offset(p4dp, 0UL);
1049         for (i = 0; i < PTRS_PER_PUD; i++) {
1050                 if (!pud_none(READ_ONCE(pudp[i])))
1051                         return;
1052         }
1053
1054         p4d_clear(p4dp);
1055         __flush_tlb_kernel_pgtable(start);
1056         free_hotplug_pgtable_page(virt_to_page(pudp));
1057 }
1058
1059 static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
1060                                  unsigned long end, unsigned long floor,
1061                                  unsigned long ceiling)
1062 {
1063         unsigned long next;
1064         p4d_t *p4dp, p4d;
1065
1066         do {
1067                 next = p4d_addr_end(addr, end);
1068                 p4dp = p4d_offset(pgdp, addr);
1069                 p4d = READ_ONCE(*p4dp);
1070                 if (p4d_none(p4d))
1071                         continue;
1072
1073                 WARN_ON(!p4d_present(p4d));
1074                 free_empty_pud_table(p4dp, addr, next, floor, ceiling);
1075         } while (addr = next, addr < end);
1076 }
1077
1078 static void free_empty_tables(unsigned long addr, unsigned long end,
1079                               unsigned long floor, unsigned long ceiling)
1080 {
1081         unsigned long next;
1082         pgd_t *pgdp, pgd;
1083
1084         do {
1085                 next = pgd_addr_end(addr, end);
1086                 pgdp = pgd_offset_k(addr);
1087                 pgd = READ_ONCE(*pgdp);
1088                 if (pgd_none(pgd))
1089                         continue;
1090
1091                 WARN_ON(!pgd_present(pgd));
1092                 free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
1093         } while (addr = next, addr < end);
1094 }
1095 #endif
1096
1097 #ifdef CONFIG_SPARSEMEM_VMEMMAP
1098 #if !ARM64_SWAPPER_USES_SECTION_MAPS
1099 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1100                 struct vmem_altmap *altmap)
1101 {
1102         return vmemmap_populate_basepages(start, end, node, altmap);
1103 }
1104 #else   /* !ARM64_SWAPPER_USES_SECTION_MAPS */
1105 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1106                 struct vmem_altmap *altmap)
1107 {
1108         unsigned long addr = start;
1109         unsigned long next;
1110         pgd_t *pgdp;
1111         p4d_t *p4dp;
1112         pud_t *pudp;
1113         pmd_t *pmdp;
1114
1115         do {
1116                 next = pmd_addr_end(addr, end);
1117
1118                 pgdp = vmemmap_pgd_populate(addr, node);
1119                 if (!pgdp)
1120                         return -ENOMEM;
1121
1122                 p4dp = vmemmap_p4d_populate(pgdp, addr, node);
1123                 if (!p4dp)
1124                         return -ENOMEM;
1125
1126                 pudp = vmemmap_pud_populate(p4dp, addr, node);
1127                 if (!pudp)
1128                         return -ENOMEM;
1129
1130                 pmdp = pmd_offset(pudp, addr);
1131                 if (pmd_none(READ_ONCE(*pmdp))) {
1132                         void *p = NULL;
1133
1134                         p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1135                         if (!p)
1136                                 return -ENOMEM;
1137
1138                         pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
1139                 } else
1140                         vmemmap_verify((pte_t *)pmdp, node, addr, next);
1141         } while (addr = next, addr != end);
1142
1143         return 0;
1144 }
1145 #endif  /* !ARM64_SWAPPER_USES_SECTION_MAPS */
1146 void vmemmap_free(unsigned long start, unsigned long end,
1147                 struct vmem_altmap *altmap)
1148 {
1149 #ifdef CONFIG_MEMORY_HOTPLUG
1150         WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1151
1152         unmap_hotplug_range(start, end, true, altmap);
1153         free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1154 #endif
1155 }
1156 #endif  /* CONFIG_SPARSEMEM_VMEMMAP */
1157
1158 static inline pud_t * fixmap_pud(unsigned long addr)
1159 {
1160         pgd_t *pgdp = pgd_offset_k(addr);
1161         p4d_t *p4dp = p4d_offset(pgdp, addr);
1162         p4d_t p4d = READ_ONCE(*p4dp);
1163
1164         BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
1165
1166         return pud_offset_kimg(p4dp, addr);
1167 }
1168
1169 static inline pmd_t * fixmap_pmd(unsigned long addr)
1170 {
1171         pud_t *pudp = fixmap_pud(addr);
1172         pud_t pud = READ_ONCE(*pudp);
1173
1174         BUG_ON(pud_none(pud) || pud_bad(pud));
1175
1176         return pmd_offset_kimg(pudp, addr);
1177 }
1178
1179 static inline pte_t * fixmap_pte(unsigned long addr)
1180 {
1181         return &bm_pte[pte_index(addr)];
1182 }
1183
1184 /*
1185  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
1186  * directly on kernel symbols (bm_p*d). This function is called too early to use
1187  * lm_alias so __p*d_populate functions must be used to populate with the
1188  * physical address from __pa_symbol.
1189  */
1190 void __init early_fixmap_init(void)
1191 {
1192         pgd_t *pgdp;
1193         p4d_t *p4dp, p4d;
1194         pud_t *pudp;
1195         pmd_t *pmdp;
1196         unsigned long addr = FIXADDR_START;
1197
1198         pgdp = pgd_offset_k(addr);
1199         p4dp = p4d_offset(pgdp, addr);
1200         p4d = READ_ONCE(*p4dp);
1201         if (CONFIG_PGTABLE_LEVELS > 3 &&
1202             !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
1203                 /*
1204                  * We only end up here if the kernel mapping and the fixmap
1205                  * share the top level pgd entry, which should only happen on
1206                  * 16k/4 levels configurations.
1207                  */
1208                 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
1209                 pudp = pud_offset_kimg(p4dp, addr);
1210         } else {
1211                 if (p4d_none(p4d))
1212                         __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
1213                 pudp = fixmap_pud(addr);
1214         }
1215         if (pud_none(READ_ONCE(*pudp)))
1216                 __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
1217         pmdp = fixmap_pmd(addr);
1218         __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
1219
1220         /*
1221          * The boot-ioremap range spans multiple pmds, for which
1222          * we are not prepared:
1223          */
1224         BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
1225                      != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
1226
1227         if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
1228              || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
1229                 WARN_ON(1);
1230                 pr_warn("pmdp %p != %p, %p\n",
1231                         pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
1232                         fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
1233                 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
1234                         fix_to_virt(FIX_BTMAP_BEGIN));
1235                 pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
1236                         fix_to_virt(FIX_BTMAP_END));
1237
1238                 pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
1239                 pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
1240         }
1241 }
1242
1243 /*
1244  * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
1245  * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
1246  */
1247 void __set_fixmap(enum fixed_addresses idx,
1248                                phys_addr_t phys, pgprot_t flags)
1249 {
1250         unsigned long addr = __fix_to_virt(idx);
1251         pte_t *ptep;
1252
1253         BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
1254
1255         ptep = fixmap_pte(addr);
1256
1257         if (pgprot_val(flags)) {
1258                 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
1259         } else {
1260                 pte_clear(&init_mm, addr, ptep);
1261                 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
1262         }
1263 }
1264
1265 void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
1266 {
1267         const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
1268         int offset;
1269         void *dt_virt;
1270
1271         /*
1272          * Check whether the physical FDT address is set and meets the minimum
1273          * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
1274          * at least 8 bytes so that we can always access the magic and size
1275          * fields of the FDT header after mapping the first chunk, double check
1276          * here if that is indeed the case.
1277          */
1278         BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
1279         if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
1280                 return NULL;
1281
1282         /*
1283          * Make sure that the FDT region can be mapped without the need to
1284          * allocate additional translation table pages, so that it is safe
1285          * to call create_mapping_noalloc() this early.
1286          *
1287          * On 64k pages, the FDT will be mapped using PTEs, so we need to
1288          * be in the same PMD as the rest of the fixmap.
1289          * On 4k pages, we'll use section mappings for the FDT so we only
1290          * have to be in the same PUD.
1291          */
1292         BUILD_BUG_ON(dt_virt_base % SZ_2M);
1293
1294         BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
1295                      __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
1296
1297         offset = dt_phys % SWAPPER_BLOCK_SIZE;
1298         dt_virt = (void *)dt_virt_base + offset;
1299
1300         /* map the first chunk so we can read the size from the header */
1301         create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
1302                         dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
1303
1304         if (fdt_magic(dt_virt) != FDT_MAGIC)
1305                 return NULL;
1306
1307         *size = fdt_totalsize(dt_virt);
1308         if (*size > MAX_FDT_SIZE)
1309                 return NULL;
1310
1311         if (offset + *size > SWAPPER_BLOCK_SIZE)
1312                 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
1313                                round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
1314
1315         return dt_virt;
1316 }
1317
1318 int __init arch_ioremap_p4d_supported(void)
1319 {
1320         return 0;
1321 }
1322
1323 int __init arch_ioremap_pud_supported(void)
1324 {
1325         /*
1326          * Only 4k granule supports level 1 block mappings.
1327          * SW table walks can't handle removal of intermediate entries.
1328          */
1329         return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
1330                !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
1331 }
1332
1333 int __init arch_ioremap_pmd_supported(void)
1334 {
1335         /* See arch_ioremap_pud_supported() */
1336         return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
1337 }
1338
1339 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
1340 {
1341         pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
1342
1343         /* Only allow permission changes for now */
1344         if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
1345                                    pud_val(new_pud)))
1346                 return 0;
1347
1348         VM_BUG_ON(phys & ~PUD_MASK);
1349         set_pud(pudp, new_pud);
1350         return 1;
1351 }
1352
1353 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
1354 {
1355         pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
1356
1357         /* Only allow permission changes for now */
1358         if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
1359                                    pmd_val(new_pmd)))
1360                 return 0;
1361
1362         VM_BUG_ON(phys & ~PMD_MASK);
1363         set_pmd(pmdp, new_pmd);
1364         return 1;
1365 }
1366
1367 int pud_clear_huge(pud_t *pudp)
1368 {
1369         if (!pud_sect(READ_ONCE(*pudp)))
1370                 return 0;
1371         pud_clear(pudp);
1372         return 1;
1373 }
1374
1375 int pmd_clear_huge(pmd_t *pmdp)
1376 {
1377         if (!pmd_sect(READ_ONCE(*pmdp)))
1378                 return 0;
1379         pmd_clear(pmdp);
1380         return 1;
1381 }
1382
1383 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1384 {
1385         pte_t *table;
1386         pmd_t pmd;
1387
1388         pmd = READ_ONCE(*pmdp);
1389
1390         if (!pmd_table(pmd)) {
1391                 VM_WARN_ON(1);
1392                 return 1;
1393         }
1394
1395         table = pte_offset_kernel(pmdp, addr);
1396         pmd_clear(pmdp);
1397         __flush_tlb_kernel_pgtable(addr);
1398         pte_free_kernel(NULL, table);
1399         return 1;
1400 }
1401
1402 int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1403 {
1404         pmd_t *table;
1405         pmd_t *pmdp;
1406         pud_t pud;
1407         unsigned long next, end;
1408
1409         pud = READ_ONCE(*pudp);
1410
1411         if (!pud_table(pud)) {
1412                 VM_WARN_ON(1);
1413                 return 1;
1414         }
1415
1416         table = pmd_offset(pudp, addr);
1417         pmdp = table;
1418         next = addr;
1419         end = addr + PUD_SIZE;
1420         do {
1421                 pmd_free_pte_page(pmdp, next);
1422         } while (pmdp++, next += PMD_SIZE, next != end);
1423
1424         pud_clear(pudp);
1425         __flush_tlb_kernel_pgtable(addr);
1426         pmd_free(NULL, table);
1427         return 1;
1428 }
1429
1430 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1431 {
1432         return 0;       /* Don't attempt a block mapping */
1433 }
1434
1435 #ifdef CONFIG_MEMORY_HOTPLUG
1436 static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
1437 {
1438         unsigned long end = start + size;
1439
1440         WARN_ON(pgdir != init_mm.pgd);
1441         WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1442
1443         unmap_hotplug_range(start, end, false, NULL);
1444         free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1445 }
1446
1447 int arch_add_memory(int nid, u64 start, u64 size,
1448                     struct mhp_params *params)
1449 {
1450         int ret, flags = 0;
1451
1452         if (rodata_full || debug_pagealloc_enabled())
1453                 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1454
1455         __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1456                              size, params->pgprot, __pgd_pgtable_alloc,
1457                              flags);
1458
1459         memblock_clear_nomap(start, size);
1460
1461         ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1462                            params);
1463         if (ret)
1464                 __remove_pgd_mapping(swapper_pg_dir,
1465                                      __phys_to_virt(start), size);
1466         return ret;
1467 }
1468
1469 void arch_remove_memory(int nid, u64 start, u64 size,
1470                         struct vmem_altmap *altmap)
1471 {
1472         unsigned long start_pfn = start >> PAGE_SHIFT;
1473         unsigned long nr_pages = size >> PAGE_SHIFT;
1474
1475         __remove_pages(start_pfn, nr_pages, altmap);
1476         __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
1477 }
1478
1479 /*
1480  * This memory hotplug notifier helps prevent boot memory from being
1481  * inadvertently removed as it blocks pfn range offlining process in
1482  * __offline_pages(). Hence this prevents both offlining as well as
1483  * removal process for boot memory which is initially always online.
1484  * In future if and when boot memory could be removed, this notifier
1485  * should be dropped and free_hotplug_page_range() should handle any
1486  * reserved pages allocated during boot.
1487  */
1488 static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
1489                                            unsigned long action, void *data)
1490 {
1491         struct mem_section *ms;
1492         struct memory_notify *arg = data;
1493         unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
1494         unsigned long pfn = arg->start_pfn;
1495
1496         if (action != MEM_GOING_OFFLINE)
1497                 return NOTIFY_OK;
1498
1499         for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1500                 ms = __pfn_to_section(pfn);
1501                 if (early_section(ms))
1502                         return NOTIFY_BAD;
1503         }
1504         return NOTIFY_OK;
1505 }
1506
1507 static struct notifier_block prevent_bootmem_remove_nb = {
1508         .notifier_call = prevent_bootmem_remove_notifier,
1509 };
1510
1511 static int __init prevent_bootmem_remove_init(void)
1512 {
1513         return register_memory_notifier(&prevent_bootmem_remove_nb);
1514 }
1515 device_initcall(prevent_bootmem_remove_init);
1516 #endif