Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[sfrench/cifs-2.6.git] / arch / powerpc / mm / hugetlbpage.c
1 /*
2  * PPC64 (POWER4) Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2003 David Gibson, IBM Corporation.
5  *
6  * Based on the IA-32 version:
7  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8  */
9
10 #include <linux/mm.h>
11 #include <linux/io.h>
12 #include <linux/hugetlb.h>
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlb.h>
16
17 #define PAGE_SHIFT_64K  16
18 #define PAGE_SHIFT_16M  24
19 #define PAGE_SHIFT_16G  34
20
21 #define MAX_NUMBER_GPAGES       1024
22
23 /* Tracks the 16G pages after the device tree is scanned and before the
24  * huge_boot_pages list is ready.  */
25 static unsigned long gpage_freearray[MAX_NUMBER_GPAGES];
26 static unsigned nr_gpages;
27
28 /* Flag to mark huge PD pointers.  This means pmd_bad() and pud_bad()
29  * will choke on pointers to hugepte tables, which is handy for
30  * catching screwups early. */
31
32 static inline int shift_to_mmu_psize(unsigned int shift)
33 {
34         int psize;
35
36         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
37                 if (mmu_psize_defs[psize].shift == shift)
38                         return psize;
39         return -1;
40 }
41
42 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
43 {
44         if (mmu_psize_defs[mmu_psize].shift)
45                 return mmu_psize_defs[mmu_psize].shift;
46         BUG();
47 }
48
49 #define hugepd_none(hpd)        ((hpd).pd == 0)
50
51 static inline pte_t *hugepd_page(hugepd_t hpd)
52 {
53         BUG_ON(!hugepd_ok(hpd));
54         return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | 0xc000000000000000);
55 }
56
57 static inline unsigned int hugepd_shift(hugepd_t hpd)
58 {
59         return hpd.pd & HUGEPD_SHIFT_MASK;
60 }
61
62 static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, unsigned pdshift)
63 {
64         unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
65         pte_t *dir = hugepd_page(*hpdp);
66
67         return dir + idx;
68 }
69
70 pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
71 {
72         pgd_t *pg;
73         pud_t *pu;
74         pmd_t *pm;
75         hugepd_t *hpdp = NULL;
76         unsigned pdshift = PGDIR_SHIFT;
77
78         if (shift)
79                 *shift = 0;
80
81         pg = pgdir + pgd_index(ea);
82         if (is_hugepd(pg)) {
83                 hpdp = (hugepd_t *)pg;
84         } else if (!pgd_none(*pg)) {
85                 pdshift = PUD_SHIFT;
86                 pu = pud_offset(pg, ea);
87                 if (is_hugepd(pu))
88                         hpdp = (hugepd_t *)pu;
89                 else if (!pud_none(*pu)) {
90                         pdshift = PMD_SHIFT;
91                         pm = pmd_offset(pu, ea);
92                         if (is_hugepd(pm))
93                                 hpdp = (hugepd_t *)pm;
94                         else if (!pmd_none(*pm)) {
95                                 return pte_offset_map(pm, ea);
96                         }
97                 }
98         }
99
100         if (!hpdp)
101                 return NULL;
102
103         if (shift)
104                 *shift = hugepd_shift(*hpdp);
105         return hugepte_offset(hpdp, ea, pdshift);
106 }
107
108 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
109 {
110         return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
111 }
112
113 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
114                            unsigned long address, unsigned pdshift, unsigned pshift)
115 {
116         pte_t *new = kmem_cache_zalloc(PGT_CACHE(pdshift - pshift),
117                                        GFP_KERNEL|__GFP_REPEAT);
118
119         BUG_ON(pshift > HUGEPD_SHIFT_MASK);
120         BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
121
122         if (! new)
123                 return -ENOMEM;
124
125         spin_lock(&mm->page_table_lock);
126         if (!hugepd_none(*hpdp))
127                 kmem_cache_free(PGT_CACHE(pdshift - pshift), new);
128         else
129                 hpdp->pd = ((unsigned long)new & ~0x8000000000000000) | pshift;
130         spin_unlock(&mm->page_table_lock);
131         return 0;
132 }
133
134 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
135 {
136         pgd_t *pg;
137         pud_t *pu;
138         pmd_t *pm;
139         hugepd_t *hpdp = NULL;
140         unsigned pshift = __ffs(sz);
141         unsigned pdshift = PGDIR_SHIFT;
142
143         addr &= ~(sz-1);
144
145         pg = pgd_offset(mm, addr);
146         if (pshift >= PUD_SHIFT) {
147                 hpdp = (hugepd_t *)pg;
148         } else {
149                 pdshift = PUD_SHIFT;
150                 pu = pud_alloc(mm, pg, addr);
151                 if (pshift >= PMD_SHIFT) {
152                         hpdp = (hugepd_t *)pu;
153                 } else {
154                         pdshift = PMD_SHIFT;
155                         pm = pmd_alloc(mm, pu, addr);
156                         hpdp = (hugepd_t *)pm;
157                 }
158         }
159
160         if (!hpdp)
161                 return NULL;
162
163         BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
164
165         if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
166                 return NULL;
167
168         return hugepte_offset(hpdp, addr, pdshift);
169 }
170
171 /* Build list of addresses of gigantic pages.  This function is used in early
172  * boot before the buddy or bootmem allocator is setup.
173  */
174 void add_gpage(unsigned long addr, unsigned long page_size,
175         unsigned long number_of_pages)
176 {
177         if (!addr)
178                 return;
179         while (number_of_pages > 0) {
180                 gpage_freearray[nr_gpages] = addr;
181                 nr_gpages++;
182                 number_of_pages--;
183                 addr += page_size;
184         }
185 }
186
187 /* Moves the gigantic page addresses from the temporary list to the
188  * huge_boot_pages list.
189  */
190 int alloc_bootmem_huge_page(struct hstate *hstate)
191 {
192         struct huge_bootmem_page *m;
193         if (nr_gpages == 0)
194                 return 0;
195         m = phys_to_virt(gpage_freearray[--nr_gpages]);
196         gpage_freearray[nr_gpages] = 0;
197         list_add(&m->list, &huge_boot_pages);
198         m->hstate = hstate;
199         return 1;
200 }
201
202 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
203 {
204         return 0;
205 }
206
207 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
208                               unsigned long start, unsigned long end,
209                               unsigned long floor, unsigned long ceiling)
210 {
211         pte_t *hugepte = hugepd_page(*hpdp);
212         unsigned shift = hugepd_shift(*hpdp);
213         unsigned long pdmask = ~((1UL << pdshift) - 1);
214
215         start &= pdmask;
216         if (start < floor)
217                 return;
218         if (ceiling) {
219                 ceiling &= pdmask;
220                 if (! ceiling)
221                         return;
222         }
223         if (end - 1 > ceiling - 1)
224                 return;
225
226         hpdp->pd = 0;
227         tlb->need_flush = 1;
228         pgtable_free_tlb(tlb, hugepte, pdshift - shift);
229 }
230
231 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
232                                    unsigned long addr, unsigned long end,
233                                    unsigned long floor, unsigned long ceiling)
234 {
235         pmd_t *pmd;
236         unsigned long next;
237         unsigned long start;
238
239         start = addr;
240         pmd = pmd_offset(pud, addr);
241         do {
242                 next = pmd_addr_end(addr, end);
243                 if (pmd_none(*pmd))
244                         continue;
245                 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
246                                   addr, next, floor, ceiling);
247         } while (pmd++, addr = next, addr != end);
248
249         start &= PUD_MASK;
250         if (start < floor)
251                 return;
252         if (ceiling) {
253                 ceiling &= PUD_MASK;
254                 if (!ceiling)
255                         return;
256         }
257         if (end - 1 > ceiling - 1)
258                 return;
259
260         pmd = pmd_offset(pud, start);
261         pud_clear(pud);
262         pmd_free_tlb(tlb, pmd, start);
263 }
264
265 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
266                                    unsigned long addr, unsigned long end,
267                                    unsigned long floor, unsigned long ceiling)
268 {
269         pud_t *pud;
270         unsigned long next;
271         unsigned long start;
272
273         start = addr;
274         pud = pud_offset(pgd, addr);
275         do {
276                 next = pud_addr_end(addr, end);
277                 if (!is_hugepd(pud)) {
278                         if (pud_none_or_clear_bad(pud))
279                                 continue;
280                         hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
281                                                ceiling);
282                 } else {
283                         free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
284                                           addr, next, floor, ceiling);
285                 }
286         } while (pud++, addr = next, addr != end);
287
288         start &= PGDIR_MASK;
289         if (start < floor)
290                 return;
291         if (ceiling) {
292                 ceiling &= PGDIR_MASK;
293                 if (!ceiling)
294                         return;
295         }
296         if (end - 1 > ceiling - 1)
297                 return;
298
299         pud = pud_offset(pgd, start);
300         pgd_clear(pgd);
301         pud_free_tlb(tlb, pud, start);
302 }
303
304 /*
305  * This function frees user-level page tables of a process.
306  *
307  * Must be called with pagetable lock held.
308  */
309 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
310                             unsigned long addr, unsigned long end,
311                             unsigned long floor, unsigned long ceiling)
312 {
313         pgd_t *pgd;
314         unsigned long next;
315
316         /*
317          * Because there are a number of different possible pagetable
318          * layouts for hugepage ranges, we limit knowledge of how
319          * things should be laid out to the allocation path
320          * (huge_pte_alloc(), above).  Everything else works out the
321          * structure as it goes from information in the hugepd
322          * pointers.  That means that we can't here use the
323          * optimization used in the normal page free_pgd_range(), of
324          * checking whether we're actually covering a large enough
325          * range to have to do anything at the top level of the walk
326          * instead of at the bottom.
327          *
328          * To make sense of this, you should probably go read the big
329          * block comment at the top of the normal free_pgd_range(),
330          * too.
331          */
332
333         pgd = pgd_offset(tlb->mm, addr);
334         do {
335                 next = pgd_addr_end(addr, end);
336                 if (!is_hugepd(pgd)) {
337                         if (pgd_none_or_clear_bad(pgd))
338                                 continue;
339                         hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
340                 } else {
341                         free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
342                                           addr, next, floor, ceiling);
343                 }
344         } while (pgd++, addr = next, addr != end);
345 }
346
347 struct page *
348 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
349 {
350         pte_t *ptep;
351         struct page *page;
352         unsigned shift;
353         unsigned long mask;
354
355         ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
356
357         /* Verify it is a huge page else bail. */
358         if (!ptep || !shift)
359                 return ERR_PTR(-EINVAL);
360
361         mask = (1UL << shift) - 1;
362         page = pte_page(*ptep);
363         if (page)
364                 page += (address & mask) / PAGE_SIZE;
365
366         return page;
367 }
368
369 int pmd_huge(pmd_t pmd)
370 {
371         return 0;
372 }
373
374 int pud_huge(pud_t pud)
375 {
376         return 0;
377 }
378
379 struct page *
380 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
381                 pmd_t *pmd, int write)
382 {
383         BUG();
384         return NULL;
385 }
386
387 static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
388                        unsigned long end, int write, struct page **pages, int *nr)
389 {
390         unsigned long mask;
391         unsigned long pte_end;
392         struct page *head, *page;
393         pte_t pte;
394         int refs;
395
396         pte_end = (addr + sz) & ~(sz-1);
397         if (pte_end < end)
398                 end = pte_end;
399
400         pte = *ptep;
401         mask = _PAGE_PRESENT | _PAGE_USER;
402         if (write)
403                 mask |= _PAGE_RW;
404
405         if ((pte_val(pte) & mask) != mask)
406                 return 0;
407
408         /* hugepages are never "special" */
409         VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
410
411         refs = 0;
412         head = pte_page(pte);
413
414         page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
415         do {
416                 VM_BUG_ON(compound_head(page) != head);
417                 pages[*nr] = page;
418                 (*nr)++;
419                 page++;
420                 refs++;
421         } while (addr += PAGE_SIZE, addr != end);
422
423         if (!page_cache_add_speculative(head, refs)) {
424                 *nr -= refs;
425                 return 0;
426         }
427
428         if (unlikely(pte_val(pte) != pte_val(*ptep))) {
429                 /* Could be optimized better */
430                 while (*nr) {
431                         put_page(page);
432                         (*nr)--;
433                 }
434         }
435
436         return 1;
437 }
438
439 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
440                                       unsigned long sz)
441 {
442         unsigned long __boundary = (addr + sz) & ~(sz-1);
443         return (__boundary - 1 < end - 1) ? __boundary : end;
444 }
445
446 int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
447                unsigned long addr, unsigned long end,
448                int write, struct page **pages, int *nr)
449 {
450         pte_t *ptep;
451         unsigned long sz = 1UL << hugepd_shift(*hugepd);
452         unsigned long next;
453
454         ptep = hugepte_offset(hugepd, addr, pdshift);
455         do {
456                 next = hugepte_addr_end(addr, end, sz);
457                 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
458                         return 0;
459         } while (ptep++, addr = next, addr != end);
460
461         return 1;
462 }
463
464 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
465                                         unsigned long len, unsigned long pgoff,
466                                         unsigned long flags)
467 {
468         struct hstate *hstate = hstate_file(file);
469         int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
470
471         return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
472 }
473
474 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
475 {
476         unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
477
478         return 1UL << mmu_psize_to_shift(psize);
479 }
480
481 static int __init add_huge_page_size(unsigned long long size)
482 {
483         int shift = __ffs(size);
484         int mmu_psize;
485
486         /* Check that it is a page size supported by the hardware and
487          * that it fits within pagetable and slice limits. */
488         if (!is_power_of_2(size)
489             || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
490                 return -EINVAL;
491
492         if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
493                 return -EINVAL;
494
495 #ifdef CONFIG_SPU_FS_64K_LS
496         /* Disable support for 64K huge pages when 64K SPU local store
497          * support is enabled as the current implementation conflicts.
498          */
499         if (shift == PAGE_SHIFT_64K)
500                 return -EINVAL;
501 #endif /* CONFIG_SPU_FS_64K_LS */
502
503         BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
504
505         /* Return if huge page size has already been setup */
506         if (size_to_hstate(size))
507                 return 0;
508
509         hugetlb_add_hstate(shift - PAGE_SHIFT);
510
511         return 0;
512 }
513
514 static int __init hugepage_setup_sz(char *str)
515 {
516         unsigned long long size;
517
518         size = memparse(str, &str);
519
520         if (add_huge_page_size(size) != 0)
521                 printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
522
523         return 1;
524 }
525 __setup("hugepagesz=", hugepage_setup_sz);
526
527 static int __init hugetlbpage_init(void)
528 {
529         int psize;
530
531         if (!cpu_has_feature(CPU_FTR_16M_PAGE))
532                 return -ENODEV;
533
534         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
535                 unsigned shift;
536                 unsigned pdshift;
537
538                 if (!mmu_psize_defs[psize].shift)
539                         continue;
540
541                 shift = mmu_psize_to_shift(psize);
542
543                 if (add_huge_page_size(1ULL << shift) < 0)
544                         continue;
545
546                 if (shift < PMD_SHIFT)
547                         pdshift = PMD_SHIFT;
548                 else if (shift < PUD_SHIFT)
549                         pdshift = PUD_SHIFT;
550                 else
551                         pdshift = PGDIR_SHIFT;
552
553                 pgtable_cache_add(pdshift - shift, NULL);
554                 if (!PGT_CACHE(pdshift - shift))
555                         panic("hugetlbpage_init(): could not create "
556                               "pgtable cache for %d bit pagesize\n", shift);
557         }
558
559         /* Set default large page size. Currently, we pick 16M or 1M
560          * depending on what is available
561          */
562         if (mmu_psize_defs[MMU_PAGE_16M].shift)
563                 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
564         else if (mmu_psize_defs[MMU_PAGE_1M].shift)
565                 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
566
567         return 0;
568 }
569
570 module_init(hugetlbpage_init);
571
572 void flush_dcache_icache_hugepage(struct page *page)
573 {
574         int i;
575
576         BUG_ON(!PageCompound(page));
577
578         for (i = 0; i < (1UL << compound_order(page)); i++)
579                 __flush_dcache_icache(page_address(page+i));
580 }