cd0e32bbcb1de0f6b16bce4ccd3f8b89acaa18b9
[sfrench/cifs-2.6.git] / arch / sparc / mm / gup.c
1 /*
2  * Lockless get_user_pages_fast for sparc, cribbed from powerpc
3  *
4  * Copyright (C) 2008 Nick Piggin
5  * Copyright (C) 2008 Novell Inc.
6  */
7
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/vmstat.h>
11 #include <linux/pagemap.h>
12 #include <linux/rwsem.h>
13 #include <asm/pgtable.h>
14
15 /*
16  * The performance critical leaf functions are made noinline otherwise gcc
17  * inlines everything into a single function which results in too much
18  * register pressure.
19  */
20 static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
21                 unsigned long end, int write, struct page **pages, int *nr)
22 {
23         unsigned long mask, result;
24         pte_t *ptep;
25
26         if (tlb_type == hypervisor) {
27                 result = _PAGE_PRESENT_4V|_PAGE_P_4V;
28                 if (write)
29                         result |= _PAGE_WRITE_4V;
30         } else {
31                 result = _PAGE_PRESENT_4U|_PAGE_P_4U;
32                 if (write)
33                         result |= _PAGE_WRITE_4U;
34         }
35         mask = result | _PAGE_SPECIAL;
36
37         ptep = pte_offset_kernel(&pmd, addr);
38         do {
39                 struct page *page, *head;
40                 pte_t pte = *ptep;
41
42                 if ((pte_val(pte) & mask) != result)
43                         return 0;
44                 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
45
46                 /* The hugepage case is simplified on sparc64 because
47                  * we encode the sub-page pfn offsets into the
48                  * hugepage PTEs.  We could optimize this in the future
49                  * use page_cache_add_speculative() for the hugepage case.
50                  */
51                 page = pte_page(pte);
52                 head = compound_head(page);
53                 if (!page_cache_get_speculative(head))
54                         return 0;
55                 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
56                         put_page(head);
57                         return 0;
58                 }
59
60                 pages[*nr] = page;
61                 (*nr)++;
62         } while (ptep++, addr += PAGE_SIZE, addr != end);
63
64         return 1;
65 }
66
67 static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
68                         unsigned long end, int write, struct page **pages,
69                         int *nr)
70 {
71         struct page *head, *page;
72         int refs;
73
74         if (!(pmd_val(pmd) & _PAGE_VALID))
75                 return 0;
76
77         if (write && !pmd_write(pmd))
78                 return 0;
79
80         refs = 0;
81         head = pmd_page(pmd);
82         page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
83         do {
84                 VM_BUG_ON(compound_head(page) != head);
85                 pages[*nr] = page;
86                 (*nr)++;
87                 page++;
88                 refs++;
89         } while (addr += PAGE_SIZE, addr != end);
90
91         if (!page_cache_add_speculative(head, refs)) {
92                 *nr -= refs;
93                 return 0;
94         }
95
96         if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
97                 *nr -= refs;
98                 while (refs--)
99                         put_page(head);
100                 return 0;
101         }
102
103         return 1;
104 }
105
106 static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
107                 int write, struct page **pages, int *nr)
108 {
109         unsigned long next;
110         pmd_t *pmdp;
111
112         pmdp = pmd_offset(&pud, addr);
113         do {
114                 pmd_t pmd = *pmdp;
115
116                 next = pmd_addr_end(addr, end);
117                 if (pmd_none(pmd))
118                         return 0;
119                 if (unlikely(pmd_large(pmd))) {
120                         if (!gup_huge_pmd(pmdp, pmd, addr, next,
121                                           write, pages, nr))
122                                 return 0;
123                 } else if (!gup_pte_range(pmd, addr, next, write,
124                                           pages, nr))
125                         return 0;
126         } while (pmdp++, addr = next, addr != end);
127
128         return 1;
129 }
130
131 static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
132                 int write, struct page **pages, int *nr)
133 {
134         unsigned long next;
135         pud_t *pudp;
136
137         pudp = pud_offset(&pgd, addr);
138         do {
139                 pud_t pud = *pudp;
140
141                 next = pud_addr_end(addr, end);
142                 if (pud_none(pud))
143                         return 0;
144                 if (!gup_pmd_range(pud, addr, next, write, pages, nr))
145                         return 0;
146         } while (pudp++, addr = next, addr != end);
147
148         return 1;
149 }
150
151 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
152                           struct page **pages)
153 {
154         struct mm_struct *mm = current->mm;
155         unsigned long addr, len, end;
156         unsigned long next, flags;
157         pgd_t *pgdp;
158         int nr = 0;
159
160         start &= PAGE_MASK;
161         addr = start;
162         len = (unsigned long) nr_pages << PAGE_SHIFT;
163         end = start + len;
164
165         local_irq_save(flags);
166         pgdp = pgd_offset(mm, addr);
167         do {
168                 pgd_t pgd = *pgdp;
169
170                 next = pgd_addr_end(addr, end);
171                 if (pgd_none(pgd))
172                         break;
173                 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
174                         break;
175         } while (pgdp++, addr = next, addr != end);
176         local_irq_restore(flags);
177
178         return nr;
179 }
180
181 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
182                         struct page **pages)
183 {
184         struct mm_struct *mm = current->mm;
185         unsigned long addr, len, end;
186         unsigned long next;
187         pgd_t *pgdp;
188         int nr = 0;
189
190         start &= PAGE_MASK;
191         addr = start;
192         len = (unsigned long) nr_pages << PAGE_SHIFT;
193         end = start + len;
194
195         /*
196          * XXX: batch / limit 'nr', to avoid large irq off latency
197          * needs some instrumenting to determine the common sizes used by
198          * important workloads (eg. DB2), and whether limiting the batch size
199          * will decrease performance.
200          *
201          * It seems like we're in the clear for the moment. Direct-IO is
202          * the main guy that batches up lots of get_user_pages, and even
203          * they are limited to 64-at-a-time which is not so many.
204          */
205         /*
206          * This doesn't prevent pagetable teardown, but does prevent
207          * the pagetables from being freed on sparc.
208          *
209          * So long as we atomically load page table pointers versus teardown,
210          * we can follow the address down to the the page and take a ref on it.
211          */
212         local_irq_disable();
213
214         pgdp = pgd_offset(mm, addr);
215         do {
216                 pgd_t pgd = *pgdp;
217
218                 next = pgd_addr_end(addr, end);
219                 if (pgd_none(pgd))
220                         goto slow;
221                 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
222                         goto slow;
223         } while (pgdp++, addr = next, addr != end);
224
225         local_irq_enable();
226
227         VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
228         return nr;
229
230         {
231                 int ret;
232
233 slow:
234                 local_irq_enable();
235
236                 /* Try to get the remaining pages with get_user_pages */
237                 start += nr << PAGE_SHIFT;
238                 pages += nr;
239
240                 ret = get_user_pages_unlocked(start,
241                         (end - start) >> PAGE_SHIFT, pages,
242                         write ? FOLL_WRITE : 0);
243
244                 /* Have to be a bit careful with return values */
245                 if (nr > 0) {
246                         if (ret < 0)
247                                 ret = nr;
248                         else
249                                 ret += nr;
250                 }
251
252                 return ret;
253         }
254 }