mm: treewide: clarify pgtable_page_{ctor,dtor}() naming
[sfrench/cifs-2.6.git] / arch / riscv / include / asm / pgalloc.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
4  * Copyright (C) 2012 Regents of the University of California
5  */
6
7 #ifndef _ASM_RISCV_PGALLOC_H
8 #define _ASM_RISCV_PGALLOC_H
9
10 #include <linux/mm.h>
11 #include <asm/tlb.h>
12
13 #include <asm-generic/pgalloc.h>        /* for pte_{alloc,free}_one */
14
15 static inline void pmd_populate_kernel(struct mm_struct *mm,
16         pmd_t *pmd, pte_t *pte)
17 {
18         unsigned long pfn = virt_to_pfn(pte);
19
20         set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
21 }
22
23 static inline void pmd_populate(struct mm_struct *mm,
24         pmd_t *pmd, pgtable_t pte)
25 {
26         unsigned long pfn = virt_to_pfn(page_address(pte));
27
28         set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
29 }
30
31 #ifndef __PAGETABLE_PMD_FOLDED
32 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
33 {
34         unsigned long pfn = virt_to_pfn(pmd);
35
36         set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
37 }
38 #endif /* __PAGETABLE_PMD_FOLDED */
39
40 #define pmd_pgtable(pmd)        pmd_page(pmd)
41
42 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
43 {
44         pgd_t *pgd;
45
46         pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
47         if (likely(pgd != NULL)) {
48                 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
49                 /* Copy kernel mappings */
50                 memcpy(pgd + USER_PTRS_PER_PGD,
51                         init_mm.pgd + USER_PTRS_PER_PGD,
52                         (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
53         }
54         return pgd;
55 }
56
57 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
58 {
59         free_page((unsigned long)pgd);
60 }
61
62 #ifndef __PAGETABLE_PMD_FOLDED
63
64 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
65 {
66         return (pmd_t *)__get_free_page(
67                 GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_ZERO);
68 }
69
70 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
71 {
72         free_page((unsigned long)pmd);
73 }
74
75 #define __pmd_free_tlb(tlb, pmd, addr)  pmd_free((tlb)->mm, pmd)
76
77 #endif /* __PAGETABLE_PMD_FOLDED */
78
79 #define __pte_free_tlb(tlb, pte, buf)   \
80 do {                                    \
81         pgtable_pte_page_dtor(pte);     \
82         tlb_remove_page((tlb), pte);    \
83 } while (0)
84
85 #endif /* _ASM_RISCV_PGALLOC_H */