Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next
[sfrench/cifs-2.6.git] / arch / sparc / mm / gup.c
1 /*
2  * Lockless get_user_pages_fast for sparc, cribbed from powerpc
3  *
4  * Copyright (C) 2008 Nick Piggin
5  * Copyright (C) 2008 Novell Inc.
6  */
7
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/vmstat.h>
11 #include <linux/pagemap.h>
12 #include <linux/rwsem.h>
13 #include <asm/pgtable.h>
14
15 /*
16  * The performance critical leaf functions are made noinline otherwise gcc
17  * inlines everything into a single function which results in too much
18  * register pressure.
19  */
20 static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
21                 unsigned long end, int write, struct page **pages, int *nr)
22 {
23         unsigned long mask, result;
24         pte_t *ptep;
25
26         if (tlb_type == hypervisor) {
27                 result = _PAGE_PRESENT_4V|_PAGE_P_4V;
28                 if (write)
29                         result |= _PAGE_WRITE_4V;
30         } else {
31                 result = _PAGE_PRESENT_4U|_PAGE_P_4U;
32                 if (write)
33                         result |= _PAGE_WRITE_4U;
34         }
35         mask = result | _PAGE_SPECIAL;
36
37         ptep = pte_offset_kernel(&pmd, addr);
38         do {
39                 struct page *page, *head;
40                 pte_t pte = *ptep;
41
42                 if ((pte_val(pte) & mask) != result)
43                         return 0;
44                 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
45
46                 /* The hugepage case is simplified on sparc64 because
47                  * we encode the sub-page pfn offsets into the
48                  * hugepage PTEs.  We could optimize this in the future
49                  * use page_cache_add_speculative() for the hugepage case.
50                  */
51                 page = pte_page(pte);
52                 head = compound_head(page);
53                 if (!page_cache_get_speculative(head))
54                         return 0;
55                 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
56                         put_page(head);
57                         return 0;
58                 }
59
60                 pages[*nr] = page;
61                 (*nr)++;
62         } while (ptep++, addr += PAGE_SIZE, addr != end);
63
64         return 1;
65 }
66
67 static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
68                         unsigned long end, int write, struct page **pages,
69                         int *nr)
70 {
71         struct page *head, *page;
72         int refs;
73
74         if (!(pmd_val(pmd) & _PAGE_VALID))
75                 return 0;
76
77         if (write && !pmd_write(pmd))
78                 return 0;
79
80         refs = 0;
81         page = pmd_page(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
82         head = compound_head(page);
83         do {
84                 VM_BUG_ON(compound_head(page) != head);
85                 pages[*nr] = page;
86                 (*nr)++;
87                 page++;
88                 refs++;
89         } while (addr += PAGE_SIZE, addr != end);
90
91         if (!page_cache_add_speculative(head, refs)) {
92                 *nr -= refs;
93                 return 0;
94         }
95
96         if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
97                 *nr -= refs;
98                 while (refs--)
99                         put_page(head);
100                 return 0;
101         }
102
103         return 1;
104 }
105
106 static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
107                         unsigned long end, int write, struct page **pages,
108                         int *nr)
109 {
110         struct page *head, *page;
111         int refs;
112
113         if (!(pud_val(pud) & _PAGE_VALID))
114                 return 0;
115
116         if (write && !pud_write(pud))
117                 return 0;
118
119         refs = 0;
120         page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
121         head = compound_head(page);
122         do {
123                 VM_BUG_ON(compound_head(page) != head);
124                 pages[*nr] = page;
125                 (*nr)++;
126                 page++;
127                 refs++;
128         } while (addr += PAGE_SIZE, addr != end);
129
130         if (!page_cache_add_speculative(head, refs)) {
131                 *nr -= refs;
132                 return 0;
133         }
134
135         if (unlikely(pud_val(pud) != pud_val(*pudp))) {
136                 *nr -= refs;
137                 while (refs--)
138                         put_page(head);
139                 return 0;
140         }
141
142         return 1;
143 }
144
145 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
146                 int write, struct page **pages, int *nr)
147 {
148         unsigned long next;
149         pmd_t *pmdp;
150
151         pmdp = pmd_offset(&pud, addr);
152         do {
153                 pmd_t pmd = *pmdp;
154
155                 next = pmd_addr_end(addr, end);
156                 if (pmd_none(pmd))
157                         return 0;
158                 if (unlikely(pmd_large(pmd))) {
159                         if (!gup_huge_pmd(pmdp, pmd, addr, next,
160                                           write, pages, nr))
161                                 return 0;
162                 } else if (!gup_pte_range(pmd, addr, next, write,
163                                           pages, nr))
164                         return 0;
165         } while (pmdp++, addr = next, addr != end);
166
167         return 1;
168 }
169
170 static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
171                 int write, struct page **pages, int *nr)
172 {
173         unsigned long next;
174         pud_t *pudp;
175
176         pudp = pud_offset(&pgd, addr);
177         do {
178                 pud_t pud = *pudp;
179
180                 next = pud_addr_end(addr, end);
181                 if (pud_none(pud))
182                         return 0;
183                 if (unlikely(pud_large(pud))) {
184                         if (!gup_huge_pud(pudp, pud, addr, next,
185                                           write, pages, nr))
186                                 return 0;
187                 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
188                         return 0;
189         } while (pudp++, addr = next, addr != end);
190
191         return 1;
192 }
193
194 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
195                           struct page **pages)
196 {
197         struct mm_struct *mm = current->mm;
198         unsigned long addr, len, end;
199         unsigned long next, flags;
200         pgd_t *pgdp;
201         int nr = 0;
202
203         start &= PAGE_MASK;
204         addr = start;
205         len = (unsigned long) nr_pages << PAGE_SHIFT;
206         end = start + len;
207
208         local_irq_save(flags);
209         pgdp = pgd_offset(mm, addr);
210         do {
211                 pgd_t pgd = *pgdp;
212
213                 next = pgd_addr_end(addr, end);
214                 if (pgd_none(pgd))
215                         break;
216                 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
217                         break;
218         } while (pgdp++, addr = next, addr != end);
219         local_irq_restore(flags);
220
221         return nr;
222 }
223
224 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
225                         struct page **pages)
226 {
227         struct mm_struct *mm = current->mm;
228         unsigned long addr, len, end;
229         unsigned long next;
230         pgd_t *pgdp;
231         int nr = 0;
232
233         start &= PAGE_MASK;
234         addr = start;
235         len = (unsigned long) nr_pages << PAGE_SHIFT;
236         end = start + len;
237
238         /*
239          * XXX: batch / limit 'nr', to avoid large irq off latency
240          * needs some instrumenting to determine the common sizes used by
241          * important workloads (eg. DB2), and whether limiting the batch size
242          * will decrease performance.
243          *
244          * It seems like we're in the clear for the moment. Direct-IO is
245          * the main guy that batches up lots of get_user_pages, and even
246          * they are limited to 64-at-a-time which is not so many.
247          */
248         /*
249          * This doesn't prevent pagetable teardown, but does prevent
250          * the pagetables from being freed on sparc.
251          *
252          * So long as we atomically load page table pointers versus teardown,
253          * we can follow the address down to the the page and take a ref on it.
254          */
255         local_irq_disable();
256
257         pgdp = pgd_offset(mm, addr);
258         do {
259                 pgd_t pgd = *pgdp;
260
261                 next = pgd_addr_end(addr, end);
262                 if (pgd_none(pgd))
263                         goto slow;
264                 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
265                         goto slow;
266         } while (pgdp++, addr = next, addr != end);
267
268         local_irq_enable();
269
270         VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
271         return nr;
272
273         {
274                 int ret;
275
276 slow:
277                 local_irq_enable();
278
279                 /* Try to get the remaining pages with get_user_pages */
280                 start += nr << PAGE_SHIFT;
281                 pages += nr;
282
283                 ret = get_user_pages_unlocked(start,
284                         (end - start) >> PAGE_SHIFT, pages,
285                         write ? FOLL_WRITE : 0);
286
287                 /* Have to be a bit careful with return values */
288                 if (nr > 0) {
289                         if (ret < 0)
290                                 ret = nr;
291                         else
292                                 ret += nr;
293                 }
294
295                 return ret;
296         }
297 }