1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 Regents of the University of California
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
12 #include <asm/pgtable-bits.h>
15 #define KERNEL_LINK_ADDR PAGE_OFFSET
16 #define KERN_VIRT_SIZE (UL(-1))
19 #define ADDRESS_SPACE_END (UL(-1))
22 /* Leave 2GB for kernel and BPF at the end of the address space */
23 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
25 #define KERNEL_LINK_ADDR PAGE_OFFSET
28 /* Number of entries in the page global directory */
29 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
30 /* Number of entries in the page table */
31 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
34 * Half of the kernel address space (1/4 of the entries of the page global
35 * directory) is for the direct mapping.
37 #define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
39 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
40 #define VMALLOC_END PAGE_OFFSET
41 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
43 #define BPF_JIT_REGION_SIZE (SZ_128M)
45 #define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
46 #define BPF_JIT_REGION_END (MODULES_END)
48 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
49 #define BPF_JIT_REGION_END (VMALLOC_END)
52 /* Modules always live before the kernel */
54 /* This is used to define the end of the KASAN shadow region */
55 #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
56 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
57 #define MODULES_END (PFN_ALIGN((unsigned long)&_start))
61 * Roughly size the vmemmap space to be large enough to fit enough
62 * struct pages to map half the virtual address space. Then
63 * position vmemmap directly below the VMALLOC region.
66 #define VA_BITS (pgtable_l5_enabled ? \
67 57 : (pgtable_l4_enabled ? 48 : 39))
72 #define VMEMMAP_SHIFT \
73 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
74 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
75 #define VMEMMAP_END VMALLOC_START
76 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
79 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
80 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
82 #define vmemmap ((struct page *)VMEMMAP_START)
84 #define PCI_IO_SIZE SZ_16M
85 #define PCI_IO_END VMEMMAP_START
86 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
88 #define FIXADDR_TOP PCI_IO_START
90 #define MAX_FDT_SIZE PMD_SIZE
91 #define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
92 #define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
94 #define MAX_FDT_SIZE PGDIR_SIZE
95 #define FIX_FDT_SIZE MAX_FDT_SIZE
96 #define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
98 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
102 #ifdef CONFIG_XIP_KERNEL
103 #define XIP_OFFSET SZ_32M
104 #define XIP_OFFSET_MASK (SZ_32M - 1)
111 #include <asm/page.h>
112 #include <asm/tlbflush.h>
113 #include <linux/mm_types.h>
115 #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
118 #include <asm/pgtable-64.h>
120 #include <asm/pgtable-32.h>
121 #endif /* CONFIG_64BIT */
123 #include <linux/page_table_check.h>
125 #ifdef CONFIG_XIP_KERNEL
126 #define XIP_FIXUP(addr) ({ \
127 uintptr_t __a = (uintptr_t)(addr); \
128 (__a >= CONFIG_XIP_PHYS_ADDR && \
129 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
130 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
134 #define XIP_FIXUP(addr) (addr)
135 #endif /* CONFIG_XIP_KERNEL */
137 struct pt_alloc_ops {
138 pte_t *(*get_pte_virt)(phys_addr_t pa);
139 phys_addr_t (*alloc_pte)(uintptr_t va);
140 #ifndef __PAGETABLE_PMD_FOLDED
141 pmd_t *(*get_pmd_virt)(phys_addr_t pa);
142 phys_addr_t (*alloc_pmd)(uintptr_t va);
143 pud_t *(*get_pud_virt)(phys_addr_t pa);
144 phys_addr_t (*alloc_pud)(uintptr_t va);
145 p4d_t *(*get_p4d_virt)(phys_addr_t pa);
146 phys_addr_t (*alloc_p4d)(uintptr_t va);
150 extern struct pt_alloc_ops pt_ops __initdata;
153 /* Number of PGD entries that a user-mode program can use */
154 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
156 /* Page protection bits */
157 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
159 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
160 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
161 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
162 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
163 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
164 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
165 _PAGE_EXEC | _PAGE_WRITE)
167 #define PAGE_COPY PAGE_READ
168 #define PAGE_COPY_EXEC PAGE_READ_EXEC
169 #define PAGE_SHARED PAGE_WRITE
170 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
172 #define _PAGE_KERNEL (_PAGE_READ \
179 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
180 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
181 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
182 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
185 #define PAGE_TABLE __pgprot(_PAGE_TABLE)
187 #define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
188 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
190 extern pgd_t swapper_pg_dir[];
191 extern pgd_t trampoline_pg_dir[];
192 extern pgd_t early_pg_dir[];
194 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
195 static inline int pmd_present(pmd_t pmd)
198 * Checking for _PAGE_LEAF is needed too because:
199 * When splitting a THP, split_huge_page() will temporarily clear
200 * the present bit, in this situation, pmd_present() and
201 * pmd_trans_huge() still needs to return true.
203 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
206 static inline int pmd_present(pmd_t pmd)
208 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
212 static inline int pmd_none(pmd_t pmd)
214 return (pmd_val(pmd) == 0);
217 static inline int pmd_bad(pmd_t pmd)
219 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
222 #define pmd_leaf pmd_leaf
223 static inline int pmd_leaf(pmd_t pmd)
225 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
228 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
233 static inline void pmd_clear(pmd_t *pmdp)
235 set_pmd(pmdp, __pmd(0));
238 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
240 unsigned long prot_val = pgprot_val(prot);
242 ALT_THEAD_PMA(prot_val);
244 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
247 static inline unsigned long _pgd_pfn(pgd_t pgd)
249 return __page_val_to_pfn(pgd_val(pgd));
252 static inline struct page *pmd_page(pmd_t pmd)
254 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
257 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
259 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
262 static inline pte_t pmd_pte(pmd_t pmd)
264 return __pte(pmd_val(pmd));
267 static inline pte_t pud_pte(pud_t pud)
269 return __pte(pud_val(pud));
272 #ifdef CONFIG_RISCV_ISA_SVNAPOT
274 static __always_inline bool has_svnapot(void)
276 return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
279 static inline unsigned long pte_napot(pte_t pte)
281 return pte_val(pte) & _PAGE_NAPOT;
284 static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
286 int pos = order - 1 + _PAGE_PFN_SHIFT;
287 unsigned long napot_bit = BIT(pos);
288 unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
290 return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
295 static __always_inline bool has_svnapot(void) { return false; }
297 static inline unsigned long pte_napot(pte_t pte)
302 #endif /* CONFIG_RISCV_ISA_SVNAPOT */
304 /* Yields the page frame number (PFN) of a page table entry */
305 static inline unsigned long pte_pfn(pte_t pte)
307 unsigned long res = __page_val_to_pfn(pte_val(pte));
309 if (has_svnapot() && pte_napot(pte))
310 res = res & (res - 1UL);
315 #define pte_page(x) pfn_to_page(pte_pfn(x))
317 /* Constructs a page table entry */
318 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
320 unsigned long prot_val = pgprot_val(prot);
322 ALT_THEAD_PMA(prot_val);
324 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
327 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
329 static inline int pte_present(pte_t pte)
331 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
334 static inline int pte_none(pte_t pte)
336 return (pte_val(pte) == 0);
339 static inline int pte_write(pte_t pte)
341 return pte_val(pte) & _PAGE_WRITE;
344 static inline int pte_exec(pte_t pte)
346 return pte_val(pte) & _PAGE_EXEC;
349 static inline int pte_user(pte_t pte)
351 return pte_val(pte) & _PAGE_USER;
354 static inline int pte_huge(pte_t pte)
356 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
359 static inline int pte_dirty(pte_t pte)
361 return pte_val(pte) & _PAGE_DIRTY;
364 static inline int pte_young(pte_t pte)
366 return pte_val(pte) & _PAGE_ACCESSED;
369 static inline int pte_special(pte_t pte)
371 return pte_val(pte) & _PAGE_SPECIAL;
374 /* static inline pte_t pte_rdprotect(pte_t pte) */
376 static inline pte_t pte_wrprotect(pte_t pte)
378 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
381 /* static inline pte_t pte_mkread(pte_t pte) */
383 static inline pte_t pte_mkwrite(pte_t pte)
385 return __pte(pte_val(pte) | _PAGE_WRITE);
388 /* static inline pte_t pte_mkexec(pte_t pte) */
390 static inline pte_t pte_mkdirty(pte_t pte)
392 return __pte(pte_val(pte) | _PAGE_DIRTY);
395 static inline pte_t pte_mkclean(pte_t pte)
397 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
400 static inline pte_t pte_mkyoung(pte_t pte)
402 return __pte(pte_val(pte) | _PAGE_ACCESSED);
405 static inline pte_t pte_mkold(pte_t pte)
407 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
410 static inline pte_t pte_mkspecial(pte_t pte)
412 return __pte(pte_val(pte) | _PAGE_SPECIAL);
415 static inline pte_t pte_mkhuge(pte_t pte)
420 #ifdef CONFIG_NUMA_BALANCING
422 * See the comment in include/asm-generic/pgtable.h
424 static inline int pte_protnone(pte_t pte)
426 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
429 static inline int pmd_protnone(pmd_t pmd)
431 return pte_protnone(pmd_pte(pmd));
435 /* Modify page protection bits */
436 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
438 unsigned long newprot_val = pgprot_val(newprot);
440 ALT_THEAD_PMA(newprot_val);
442 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
445 #define pgd_ERROR(e) \
446 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
449 /* Commit new configuration to MMU hardware */
450 static inline void update_mmu_cache_range(struct vm_fault *vmf,
451 struct vm_area_struct *vma, unsigned long address,
452 pte_t *ptep, unsigned int nr)
455 * The kernel assumes that TLBs don't cache invalid entries, but
456 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
457 * cache flush; it is necessary even after writing invalid entries.
458 * Relying on flush_tlb_fix_spurious_fault would suffice, but
459 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
462 local_flush_tlb_page(address + nr * PAGE_SIZE);
464 #define update_mmu_cache(vma, addr, ptep) \
465 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
467 #define __HAVE_ARCH_UPDATE_MMU_TLB
468 #define update_mmu_tlb update_mmu_cache
470 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
471 unsigned long address, pmd_t *pmdp)
473 pte_t *ptep = (pte_t *)pmdp;
475 update_mmu_cache(vma, address, ptep);
478 #define __HAVE_ARCH_PTE_SAME
479 static inline int pte_same(pte_t pte_a, pte_t pte_b)
481 return pte_val(pte_a) == pte_val(pte_b);
485 * Certain architectures need to do special things when PTEs within
486 * a page table are directly modified. Thus, the following hook is
489 static inline void set_pte(pte_t *ptep, pte_t pteval)
494 void flush_icache_pte(pte_t pte);
496 static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
498 if (pte_present(pteval) && pte_exec(pteval))
499 flush_icache_pte(pteval);
501 set_pte(ptep, pteval);
504 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
505 pte_t *ptep, pte_t pteval, unsigned int nr)
507 page_table_check_ptes_set(mm, ptep, pteval, nr);
510 __set_pte_at(ptep, pteval);
514 pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
517 #define set_ptes set_ptes
519 static inline void pte_clear(struct mm_struct *mm,
520 unsigned long addr, pte_t *ptep)
522 __set_pte_at(ptep, __pte(0));
525 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
526 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
527 unsigned long address, pte_t *ptep,
528 pte_t entry, int dirty)
530 if (!pte_same(*ptep, entry))
531 __set_pte_at(ptep, entry);
533 * update_mmu_cache will unconditionally execute, handling both
534 * the case that the PTE changed and the spurious fault case.
539 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
540 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
541 unsigned long address, pte_t *ptep)
543 pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
545 page_table_check_pte_clear(mm, pte);
550 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
551 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
552 unsigned long address,
555 if (!pte_young(*ptep))
557 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
560 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
561 static inline void ptep_set_wrprotect(struct mm_struct *mm,
562 unsigned long address, pte_t *ptep)
564 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
567 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
568 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
569 unsigned long address, pte_t *ptep)
572 * This comment is borrowed from x86, but applies equally to RISC-V:
574 * Clearing the accessed bit without a TLB flush
575 * doesn't cause data corruption. [ It could cause incorrect
576 * page aging and the (mistaken) reclaim of hot pages, but the
577 * chance of that should be relatively low. ]
579 * So as a performance optimization don't flush the TLB when
580 * clearing the accessed bit, it will eventually be flushed by
581 * a context switch or a VM operation anyway. [ In the rare
582 * event of it not getting flushed for a long time the delay
583 * shouldn't really matter because there's no real memory
584 * pressure for swapout to react to. ]
586 return ptep_test_and_clear_young(vma, address, ptep);
589 #define pgprot_noncached pgprot_noncached
590 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
592 unsigned long prot = pgprot_val(_prot);
594 prot &= ~_PAGE_MTMASK;
597 return __pgprot(prot);
600 #define pgprot_writecombine pgprot_writecombine
601 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
603 unsigned long prot = pgprot_val(_prot);
605 prot &= ~_PAGE_MTMASK;
606 prot |= _PAGE_NOCACHE;
608 return __pgprot(prot);
614 static inline pmd_t pte_pmd(pte_t pte)
616 return __pmd(pte_val(pte));
619 static inline pmd_t pmd_mkhuge(pmd_t pmd)
624 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
626 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
629 #define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
631 static inline unsigned long pmd_pfn(pmd_t pmd)
633 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
636 #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
638 static inline unsigned long pud_pfn(pud_t pud)
640 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
643 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
645 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
648 #define pmd_write pmd_write
649 static inline int pmd_write(pmd_t pmd)
651 return pte_write(pmd_pte(pmd));
654 static inline int pmd_dirty(pmd_t pmd)
656 return pte_dirty(pmd_pte(pmd));
659 #define pmd_young pmd_young
660 static inline int pmd_young(pmd_t pmd)
662 return pte_young(pmd_pte(pmd));
665 static inline int pmd_user(pmd_t pmd)
667 return pte_user(pmd_pte(pmd));
670 static inline pmd_t pmd_mkold(pmd_t pmd)
672 return pte_pmd(pte_mkold(pmd_pte(pmd)));
675 static inline pmd_t pmd_mkyoung(pmd_t pmd)
677 return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
680 static inline pmd_t pmd_mkwrite(pmd_t pmd)
682 return pte_pmd(pte_mkwrite(pmd_pte(pmd)));
685 static inline pmd_t pmd_wrprotect(pmd_t pmd)
687 return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
690 static inline pmd_t pmd_mkclean(pmd_t pmd)
692 return pte_pmd(pte_mkclean(pmd_pte(pmd)));
695 static inline pmd_t pmd_mkdirty(pmd_t pmd)
697 return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
700 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
701 pmd_t *pmdp, pmd_t pmd)
703 page_table_check_pmd_set(mm, pmdp, pmd);
704 return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
707 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
708 pud_t *pudp, pud_t pud)
710 page_table_check_pud_set(mm, pudp, pud);
711 return __set_pte_at((pte_t *)pudp, pud_pte(pud));
714 #ifdef CONFIG_PAGE_TABLE_CHECK
715 static inline bool pte_user_accessible_page(pte_t pte)
717 return pte_present(pte) && pte_user(pte);
720 static inline bool pmd_user_accessible_page(pmd_t pmd)
722 return pmd_leaf(pmd) && pmd_user(pmd);
725 static inline bool pud_user_accessible_page(pud_t pud)
727 return pud_leaf(pud) && pud_user(pud);
731 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
732 static inline int pmd_trans_huge(pmd_t pmd)
734 return pmd_leaf(pmd);
737 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
738 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
739 unsigned long address, pmd_t *pmdp,
740 pmd_t entry, int dirty)
742 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
745 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
746 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
747 unsigned long address, pmd_t *pmdp)
749 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
752 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
753 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
754 unsigned long address, pmd_t *pmdp)
756 pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
758 page_table_check_pmd_clear(mm, pmd);
763 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
764 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
765 unsigned long address, pmd_t *pmdp)
767 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
770 #define pmdp_establish pmdp_establish
771 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
772 unsigned long address, pmd_t *pmdp, pmd_t pmd)
774 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
775 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
778 #define pmdp_collapse_flush pmdp_collapse_flush
779 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
780 unsigned long address, pmd_t *pmdp);
781 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
784 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
785 * are !pte_none() && !pte_present().
787 * Format of swap PTE:
788 * bit 0: _PAGE_PRESENT (zero)
789 * bit 1 to 3: _PAGE_LEAF (zero)
790 * bit 5: _PAGE_PROT_NONE (zero)
791 * bit 6: exclusive marker
792 * bits 7 to 11: swap type
793 * bits 11 to XLEN-1: swap offset
795 #define __SWP_TYPE_SHIFT 7
796 #define __SWP_TYPE_BITS 5
797 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
798 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
800 #define MAX_SWAPFILES_CHECK() \
801 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
803 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
804 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
805 #define __swp_entry(type, offset) ((swp_entry_t) \
806 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
807 ((offset) << __SWP_OFFSET_SHIFT) })
809 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
810 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
812 static inline int pte_swp_exclusive(pte_t pte)
814 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
817 static inline pte_t pte_swp_mkexclusive(pte_t pte)
819 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
822 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
824 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
827 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
828 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
829 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
830 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
833 * In the RV64 Linux scheme, we give the user half of the virtual-address space
834 * and give the kernel the other (upper) half.
837 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
839 #define KERN_VIRT_START FIXADDR_START
843 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
844 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
846 * - 0x9fc00000 (~2.5GB) for RV32.
847 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
848 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
850 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
851 * Instruction Set Manual Volume II: Privileged Architecture" states that
852 * "load and store effective addresses, which are 64bits, must have bits
853 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
856 #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
857 #define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
860 #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
861 #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
862 TASK_SIZE_32 : TASK_SIZE_64)
864 #define TASK_SIZE TASK_SIZE_64
868 #define TASK_SIZE FIXADDR_START
869 #define TASK_SIZE_MIN TASK_SIZE
872 #else /* CONFIG_MMU */
874 #define PAGE_SHARED __pgprot(0)
875 #define PAGE_KERNEL __pgprot(0)
876 #define swapper_pg_dir NULL
877 #define TASK_SIZE 0xffffffffUL
878 #define VMALLOC_START 0
879 #define VMALLOC_END TASK_SIZE
881 #endif /* !CONFIG_MMU */
883 extern char _start[];
884 extern void *_dtb_early_va;
885 extern uintptr_t _dtb_early_pa;
886 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
887 #define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
888 #define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
890 #define dtb_early_va _dtb_early_va
891 #define dtb_early_pa _dtb_early_pa
892 #endif /* CONFIG_XIP_KERNEL */
893 extern u64 satp_mode;
894 extern bool pgtable_l4_enabled;
896 void paging_init(void);
897 void misc_mem_init(void);
900 * ZERO_PAGE is a global shared page that is always zero,
901 * used for zero-mapped memory areas, etc.
903 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
904 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
906 #endif /* !__ASSEMBLY__ */
908 #endif /* _ASM_RISCV_PGTABLE_H */