Merge branch 'linux-4.14' of git://github.com/skeggsb/linux into drm-fixes
[sfrench/cifs-2.6.git] / mm / page_vma_mapped.c
1 #include <linux/mm.h>
2 #include <linux/rmap.h>
3 #include <linux/hugetlb.h>
4 #include <linux/swap.h>
5 #include <linux/swapops.h>
6
7 #include "internal.h"
8
9 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
10 {
11         page_vma_mapped_walk_done(pvmw);
12         return false;
13 }
14
15 static bool map_pte(struct page_vma_mapped_walk *pvmw)
16 {
17         pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
18         if (!(pvmw->flags & PVMW_SYNC)) {
19                 if (pvmw->flags & PVMW_MIGRATION) {
20                         if (!is_swap_pte(*pvmw->pte))
21                                 return false;
22                 } else {
23                         if (!pte_present(*pvmw->pte))
24                                 return false;
25                 }
26         }
27         pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
28         spin_lock(pvmw->ptl);
29         return true;
30 }
31
32 static bool check_pte(struct page_vma_mapped_walk *pvmw)
33 {
34         if (pvmw->flags & PVMW_MIGRATION) {
35 #ifdef CONFIG_MIGRATION
36                 swp_entry_t entry;
37                 if (!is_swap_pte(*pvmw->pte))
38                         return false;
39                 entry = pte_to_swp_entry(*pvmw->pte);
40
41                 if (!is_migration_entry(entry))
42                         return false;
43                 if (migration_entry_to_page(entry) - pvmw->page >=
44                                 hpage_nr_pages(pvmw->page)) {
45                         return false;
46                 }
47                 if (migration_entry_to_page(entry) < pvmw->page)
48                         return false;
49 #else
50                 WARN_ON_ONCE(1);
51 #endif
52         } else {
53                 if (is_swap_pte(*pvmw->pte)) {
54                         swp_entry_t entry;
55
56                         entry = pte_to_swp_entry(*pvmw->pte);
57                         if (is_device_private_entry(entry) &&
58                             device_private_entry_to_page(entry) == pvmw->page)
59                                 return true;
60                 }
61
62                 if (!pte_present(*pvmw->pte))
63                         return false;
64
65                 /* THP can be referenced by any subpage */
66                 if (pte_page(*pvmw->pte) - pvmw->page >=
67                                 hpage_nr_pages(pvmw->page)) {
68                         return false;
69                 }
70                 if (pte_page(*pvmw->pte) < pvmw->page)
71                         return false;
72         }
73
74         return true;
75 }
76
77 /**
78  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
79  * @pvmw->address
80  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
81  * must be set. pmd, pte and ptl must be NULL.
82  *
83  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
84  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
85  * adjusted if needed (for PTE-mapped THPs).
86  *
87  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
88  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
89  * a loop to find all PTEs that map the THP.
90  *
91  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
92  * regardless of which page table level the page is mapped at. @pvmw->pmd is
93  * NULL.
94  *
95  * Retruns false if there are no more page table entries for the page in
96  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
97  *
98  * If you need to stop the walk before page_vma_mapped_walk() returned false,
99  * use page_vma_mapped_walk_done(). It will do the housekeeping.
100  */
101 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
102 {
103         struct mm_struct *mm = pvmw->vma->vm_mm;
104         struct page *page = pvmw->page;
105         pgd_t *pgd;
106         p4d_t *p4d;
107         pud_t *pud;
108         pmd_t pmde;
109
110         /* The only possible pmd mapping has been handled on last iteration */
111         if (pvmw->pmd && !pvmw->pte)
112                 return not_found(pvmw);
113
114         if (pvmw->pte)
115                 goto next_pte;
116
117         if (unlikely(PageHuge(pvmw->page))) {
118                 /* when pud is not present, pte will be NULL */
119                 pvmw->pte = huge_pte_offset(mm, pvmw->address,
120                                             PAGE_SIZE << compound_order(page));
121                 if (!pvmw->pte)
122                         return false;
123
124                 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
125                 spin_lock(pvmw->ptl);
126                 if (!check_pte(pvmw))
127                         return not_found(pvmw);
128                 return true;
129         }
130 restart:
131         pgd = pgd_offset(mm, pvmw->address);
132         if (!pgd_present(*pgd))
133                 return false;
134         p4d = p4d_offset(pgd, pvmw->address);
135         if (!p4d_present(*p4d))
136                 return false;
137         pud = pud_offset(p4d, pvmw->address);
138         if (!pud_present(*pud))
139                 return false;
140         pvmw->pmd = pmd_offset(pud, pvmw->address);
141         /*
142          * Make sure the pmd value isn't cached in a register by the
143          * compiler and used as a stale value after we've observed a
144          * subsequent update.
145          */
146         pmde = READ_ONCE(*pvmw->pmd);
147         if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
148                 pvmw->ptl = pmd_lock(mm, pvmw->pmd);
149                 if (likely(pmd_trans_huge(*pvmw->pmd))) {
150                         if (pvmw->flags & PVMW_MIGRATION)
151                                 return not_found(pvmw);
152                         if (pmd_page(*pvmw->pmd) != page)
153                                 return not_found(pvmw);
154                         return true;
155                 } else if (!pmd_present(*pvmw->pmd)) {
156                         if (thp_migration_supported()) {
157                                 if (!(pvmw->flags & PVMW_MIGRATION))
158                                         return not_found(pvmw);
159                                 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
160                                         swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
161
162                                         if (migration_entry_to_page(entry) != page)
163                                                 return not_found(pvmw);
164                                         return true;
165                                 }
166                         }
167                         return not_found(pvmw);
168                 } else {
169                         /* THP pmd was split under us: handle on pte level */
170                         spin_unlock(pvmw->ptl);
171                         pvmw->ptl = NULL;
172                 }
173         } else if (!pmd_present(pmde)) {
174                 return false;
175         }
176         if (!map_pte(pvmw))
177                 goto next_pte;
178         while (1) {
179                 if (check_pte(pvmw))
180                         return true;
181 next_pte:
182                 /* Seek to next pte only makes sense for THP */
183                 if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
184                         return not_found(pvmw);
185                 do {
186                         pvmw->address += PAGE_SIZE;
187                         if (pvmw->address >= pvmw->vma->vm_end ||
188                             pvmw->address >=
189                                         __vma_address(pvmw->page, pvmw->vma) +
190                                         hpage_nr_pages(pvmw->page) * PAGE_SIZE)
191                                 return not_found(pvmw);
192                         /* Did we cross page table boundary? */
193                         if (pvmw->address % PMD_SIZE == 0) {
194                                 pte_unmap(pvmw->pte);
195                                 if (pvmw->ptl) {
196                                         spin_unlock(pvmw->ptl);
197                                         pvmw->ptl = NULL;
198                                 }
199                                 goto restart;
200                         } else {
201                                 pvmw->pte++;
202                         }
203                 } while (pte_none(*pvmw->pte));
204
205                 if (!pvmw->ptl) {
206                         pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
207                         spin_lock(pvmw->ptl);
208                 }
209         }
210 }
211
212 /**
213  * page_mapped_in_vma - check whether a page is really mapped in a VMA
214  * @page: the page to test
215  * @vma: the VMA to test
216  *
217  * Returns 1 if the page is mapped into the page tables of the VMA, 0
218  * if the page is not mapped into the page tables of this VMA.  Only
219  * valid for normal file or anonymous VMAs.
220  */
221 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
222 {
223         struct page_vma_mapped_walk pvmw = {
224                 .page = page,
225                 .vma = vma,
226                 .flags = PVMW_SYNC,
227         };
228         unsigned long start, end;
229
230         start = __vma_address(page, vma);
231         end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
232
233         if (unlikely(end < vma->vm_start || start >= vma->vm_end))
234                 return 0;
235         pvmw.address = max(start, vma->vm_start);
236         if (!page_vma_mapped_walk(&pvmw))
237                 return 0;
238         page_vma_mapped_walk_done(&pvmw);
239         return 1;
240 }