Merge branch 'uaccess' (batched user access infrastructure)
[sfrench/cifs-2.6.git] / include / linux / huge_mm.h
1 #ifndef _LINUX_HUGE_MM_H
2 #define _LINUX_HUGE_MM_H
3
4 extern int do_huge_pmd_anonymous_page(struct mm_struct *mm,
5                                       struct vm_area_struct *vma,
6                                       unsigned long address, pmd_t *pmd,
7                                       unsigned int flags);
8 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
9                          pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
10                          struct vm_area_struct *vma);
11 extern void huge_pmd_set_accessed(struct mm_struct *mm,
12                                   struct vm_area_struct *vma,
13                                   unsigned long address, pmd_t *pmd,
14                                   pmd_t orig_pmd, int dirty);
15 extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
16                                unsigned long address, pmd_t *pmd,
17                                pmd_t orig_pmd);
18 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
19                                           unsigned long addr,
20                                           pmd_t *pmd,
21                                           unsigned int flags);
22 extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
23                         struct vm_area_struct *vma,
24                         pmd_t *pmd, unsigned long addr, unsigned long next);
25 extern int zap_huge_pmd(struct mmu_gather *tlb,
26                         struct vm_area_struct *vma,
27                         pmd_t *pmd, unsigned long addr);
28 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
29                         unsigned long addr, unsigned long end,
30                         unsigned char *vec);
31 extern bool move_huge_pmd(struct vm_area_struct *vma,
32                          struct vm_area_struct *new_vma,
33                          unsigned long old_addr,
34                          unsigned long new_addr, unsigned long old_end,
35                          pmd_t *old_pmd, pmd_t *new_pmd);
36 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
37                         unsigned long addr, pgprot_t newprot,
38                         int prot_numa);
39 int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
40                         pfn_t pfn, bool write);
41 enum transparent_hugepage_flag {
42         TRANSPARENT_HUGEPAGE_FLAG,
43         TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
44         TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
45         TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
46         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
47         TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
48 #ifdef CONFIG_DEBUG_VM
49         TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
50 #endif
51 };
52
53 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
54 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
55
56 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
57 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
58                 pmd_t *pmd, int flags);
59
60 #define HPAGE_PMD_SHIFT PMD_SHIFT
61 #define HPAGE_PMD_SIZE  ((1UL) << HPAGE_PMD_SHIFT)
62 #define HPAGE_PMD_MASK  (~(HPAGE_PMD_SIZE - 1))
63
64 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
65
66 #define transparent_hugepage_enabled(__vma)                             \
67         ((transparent_hugepage_flags &                                  \
68           (1<<TRANSPARENT_HUGEPAGE_FLAG) ||                             \
69           (transparent_hugepage_flags &                                 \
70            (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&                   \
71            ((__vma)->vm_flags & VM_HUGEPAGE))) &&                       \
72          !((__vma)->vm_flags & VM_NOHUGEPAGE) &&                        \
73          !is_vma_temporary_stack(__vma))
74 #define transparent_hugepage_defrag(__vma)                              \
75         ((transparent_hugepage_flags &                                  \
76           (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) ||                     \
77          (transparent_hugepage_flags &                                  \
78           (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG) &&             \
79           (__vma)->vm_flags & VM_HUGEPAGE))
80 #define transparent_hugepage_use_zero_page()                            \
81         (transparent_hugepage_flags &                                   \
82          (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
83 #ifdef CONFIG_DEBUG_VM
84 #define transparent_hugepage_debug_cow()                                \
85         (transparent_hugepage_flags &                                   \
86          (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
87 #else /* CONFIG_DEBUG_VM */
88 #define transparent_hugepage_debug_cow() 0
89 #endif /* CONFIG_DEBUG_VM */
90
91 extern unsigned long transparent_hugepage_flags;
92
93 extern void prep_transhuge_page(struct page *page);
94 extern void free_transhuge_page(struct page *page);
95
96 int split_huge_page_to_list(struct page *page, struct list_head *list);
97 static inline int split_huge_page(struct page *page)
98 {
99         return split_huge_page_to_list(page, NULL);
100 }
101 void deferred_split_huge_page(struct page *page);
102
103 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
104                 unsigned long address);
105
106 #define split_huge_pmd(__vma, __pmd, __address)                         \
107         do {                                                            \
108                 pmd_t *____pmd = (__pmd);                               \
109                 if (pmd_trans_huge(*____pmd)                            \
110                                         || pmd_devmap(*____pmd))        \
111                         __split_huge_pmd(__vma, __pmd, __address);      \
112         }  while (0)
113
114 #if HPAGE_PMD_ORDER >= MAX_ORDER
115 #error "hugepages can't be allocated by the buddy allocator"
116 #endif
117 extern int hugepage_madvise(struct vm_area_struct *vma,
118                             unsigned long *vm_flags, int advice);
119 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
120                                     unsigned long start,
121                                     unsigned long end,
122                                     long adjust_next);
123 extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
124                 spinlock_t **ptl);
125 /* mmap_sem must be held on entry */
126 static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
127                 spinlock_t **ptl)
128 {
129         VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
130         if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
131                 return __pmd_trans_huge_lock(pmd, vma, ptl);
132         else
133                 return false;
134 }
135 static inline int hpage_nr_pages(struct page *page)
136 {
137         if (unlikely(PageTransHuge(page)))
138                 return HPAGE_PMD_NR;
139         return 1;
140 }
141
142 extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
143                                 unsigned long addr, pmd_t pmd, pmd_t *pmdp);
144
145 extern struct page *huge_zero_page;
146
147 static inline bool is_huge_zero_page(struct page *page)
148 {
149         return ACCESS_ONCE(huge_zero_page) == page;
150 }
151
152 static inline bool is_huge_zero_pmd(pmd_t pmd)
153 {
154         return is_huge_zero_page(pmd_page(pmd));
155 }
156
157 struct page *get_huge_zero_page(void);
158
159 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
160 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
161 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
162 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
163
164 #define hpage_nr_pages(x) 1
165
166 #define transparent_hugepage_enabled(__vma) 0
167
168 #define transparent_hugepage_flags 0UL
169 static inline int
170 split_huge_page_to_list(struct page *page, struct list_head *list)
171 {
172         return 0;
173 }
174 static inline int split_huge_page(struct page *page)
175 {
176         return 0;
177 }
178 static inline void deferred_split_huge_page(struct page *page) {}
179 #define split_huge_pmd(__vma, __pmd, __address) \
180         do { } while (0)
181 static inline int hugepage_madvise(struct vm_area_struct *vma,
182                                    unsigned long *vm_flags, int advice)
183 {
184         BUG();
185         return 0;
186 }
187 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
188                                          unsigned long start,
189                                          unsigned long end,
190                                          long adjust_next)
191 {
192 }
193 static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
194                 spinlock_t **ptl)
195 {
196         return false;
197 }
198
199 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
200                                         unsigned long addr, pmd_t pmd, pmd_t *pmdp)
201 {
202         return 0;
203 }
204
205 static inline bool is_huge_zero_page(struct page *page)
206 {
207         return false;
208 }
209
210
211 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
212                 unsigned long addr, pmd_t *pmd, int flags)
213 {
214         return NULL;
215 }
216 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
217
218 #endif /* _LINUX_HUGE_MM_H */