Merge branch 'x86/core' into x86/xsave
[sfrench/cifs-2.6.git] / arch / ia64 / mm / hugetlbpage.c
1 /*
2  * IA-64 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5  * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6  *
7  * Sep, 2003: add numa support
8  * Feb, 2004: dynamic hugetlb page size via boot parameter
9  */
10
11 #include <linux/init.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/hugetlb.h>
15 #include <linux/pagemap.h>
16 #include <linux/slab.h>
17 #include <linux/sysctl.h>
18 #include <linux/log2.h>
19 #include <asm/mman.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlb.h>
22 #include <asm/tlbflush.h>
23
24 unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
25
26 pte_t *
27 huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
28 {
29         unsigned long taddr = htlbpage_to_page(addr);
30         pgd_t *pgd;
31         pud_t *pud;
32         pmd_t *pmd;
33         pte_t *pte = NULL;
34
35         pgd = pgd_offset(mm, taddr);
36         pud = pud_alloc(mm, pgd, taddr);
37         if (pud) {
38                 pmd = pmd_alloc(mm, pud, taddr);
39                 if (pmd)
40                         pte = pte_alloc_map(mm, pmd, taddr);
41         }
42         return pte;
43 }
44
45 pte_t *
46 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
47 {
48         unsigned long taddr = htlbpage_to_page(addr);
49         pgd_t *pgd;
50         pud_t *pud;
51         pmd_t *pmd;
52         pte_t *pte = NULL;
53
54         pgd = pgd_offset(mm, taddr);
55         if (pgd_present(*pgd)) {
56                 pud = pud_offset(pgd, taddr);
57                 if (pud_present(*pud)) {
58                         pmd = pmd_offset(pud, taddr);
59                         if (pmd_present(*pmd))
60                                 pte = pte_offset_map(pmd, taddr);
61                 }
62         }
63
64         return pte;
65 }
66
67 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
68 {
69         return 0;
70 }
71
72 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
73
74 /*
75  * Don't actually need to do any preparation, but need to make sure
76  * the address is in the right region.
77  */
78 int prepare_hugepage_range(struct file *file,
79                         unsigned long addr, unsigned long len)
80 {
81         if (len & ~HPAGE_MASK)
82                 return -EINVAL;
83         if (addr & ~HPAGE_MASK)
84                 return -EINVAL;
85         if (REGION_NUMBER(addr) != RGN_HPAGE)
86                 return -EINVAL;
87
88         return 0;
89 }
90
91 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
92 {
93         struct page *page;
94         pte_t *ptep;
95
96         if (REGION_NUMBER(addr) != RGN_HPAGE)
97                 return ERR_PTR(-EINVAL);
98
99         ptep = huge_pte_offset(mm, addr);
100         if (!ptep || pte_none(*ptep))
101                 return NULL;
102         page = pte_page(*ptep);
103         page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
104         return page;
105 }
106 int pmd_huge(pmd_t pmd)
107 {
108         return 0;
109 }
110
111 int pud_huge(pud_t pud)
112 {
113         return 0;
114 }
115
116 struct page *
117 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
118 {
119         return NULL;
120 }
121
122 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
123                         unsigned long addr, unsigned long end,
124                         unsigned long floor, unsigned long ceiling)
125 {
126         /*
127          * This is called to free hugetlb page tables.
128          *
129          * The offset of these addresses from the base of the hugetlb
130          * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
131          * the standard free_pgd_range will free the right page tables.
132          *
133          * If floor and ceiling are also in the hugetlb region, they
134          * must likewise be scaled down; but if outside, left unchanged.
135          */
136
137         addr = htlbpage_to_page(addr);
138         end  = htlbpage_to_page(end);
139         if (REGION_NUMBER(floor) == RGN_HPAGE)
140                 floor = htlbpage_to_page(floor);
141         if (REGION_NUMBER(ceiling) == RGN_HPAGE)
142                 ceiling = htlbpage_to_page(ceiling);
143
144         free_pgd_range(tlb, addr, end, floor, ceiling);
145 }
146
147 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
148                 unsigned long pgoff, unsigned long flags)
149 {
150         struct vm_area_struct *vmm;
151
152         if (len > RGN_MAP_LIMIT)
153                 return -ENOMEM;
154         if (len & ~HPAGE_MASK)
155                 return -EINVAL;
156
157         /* Handle MAP_FIXED */
158         if (flags & MAP_FIXED) {
159                 if (prepare_hugepage_range(file, addr, len))
160                         return -EINVAL;
161                 return addr;
162         }
163
164         /* This code assumes that RGN_HPAGE != 0. */
165         if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
166                 addr = HPAGE_REGION_BASE;
167         else
168                 addr = ALIGN(addr, HPAGE_SIZE);
169         for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
170                 /* At this point:  (!vmm || addr < vmm->vm_end). */
171                 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
172                         return -ENOMEM;
173                 if (!vmm || (addr + len) <= vmm->vm_start)
174                         return addr;
175                 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
176         }
177 }
178
179 static int __init hugetlb_setup_sz(char *str)
180 {
181         u64 tr_pages;
182         unsigned long long size;
183
184         if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
185                 /*
186                  * shouldn't happen, but just in case.
187                  */
188                 tr_pages = 0x15557000UL;
189
190         size = memparse(str, &str);
191         if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
192                 size <= PAGE_SIZE ||
193                 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
194                 printk(KERN_WARNING "Invalid huge page size specified\n");
195                 return 1;
196         }
197
198         hpage_shift = __ffs(size);
199         /*
200          * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
201          * override here with new page shift.
202          */
203         ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
204         return 0;
205 }
206 early_param("hugepagesz", hugetlb_setup_sz);