effcd78d5f4039761bd3598f197031e2940640a4
[sfrench/cifs-2.6.git] / arch / x86 / mm / pageattr.c
1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5 #include <linux/highmem.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/mm.h>
10
11 void clflush_cache_range(void *addr, int size)
12 {
13         int i;
14
15         for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
16                 clflush(addr+i);
17 }
18
19 #include <asm/processor.h>
20 #include <asm/tlbflush.h>
21 #include <asm/sections.h>
22 #include <asm/uaccess.h>
23 #include <asm/pgalloc.h>
24
25 /*
26  * We allow the BIOS range to be executable:
27  */
28 #define BIOS_BEGIN              0x000a0000
29 #define BIOS_END                0x00100000
30
31 static inline pgprot_t check_exec(pgprot_t prot, unsigned long address)
32 {
33         if (__pa(address) >= BIOS_BEGIN && __pa(address) < BIOS_END)
34                 pgprot_val(prot) &= ~_PAGE_NX;
35         /*
36          * Better fail early if someone sets the kernel text to NX.
37          * Does not cover __inittext
38          */
39         BUG_ON(address >= (unsigned long)&_text &&
40                 address < (unsigned long)&_etext &&
41                (pgprot_val(prot) & _PAGE_NX));
42
43         return prot;
44 }
45
46 pte_t *lookup_address(unsigned long address, int *level)
47 {
48         pgd_t *pgd = pgd_offset_k(address);
49         pud_t *pud;
50         pmd_t *pmd;
51
52         *level = PG_LEVEL_NONE;
53
54         if (pgd_none(*pgd))
55                 return NULL;
56         pud = pud_offset(pgd, address);
57         if (pud_none(*pud))
58                 return NULL;
59         pmd = pmd_offset(pud, address);
60         if (pmd_none(*pmd))
61                 return NULL;
62
63         *level = PG_LEVEL_2M;
64         if (pmd_large(*pmd))
65                 return (pte_t *)pmd;
66
67         *level = PG_LEVEL_4K;
68         return pte_offset_kernel(pmd, address);
69 }
70
71 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
72 {
73         /* change init_mm */
74         set_pte_atomic(kpte, pte);
75 #ifdef CONFIG_X86_32
76         if (!SHARED_KERNEL_PMD) {
77                 struct page *page;
78
79                 for (page = pgd_list; page; page = (struct page *)page->index) {
80                         pgd_t *pgd;
81                         pud_t *pud;
82                         pmd_t *pmd;
83
84                         pgd = (pgd_t *)page_address(page) + pgd_index(address);
85                         pud = pud_offset(pgd, address);
86                         pmd = pmd_offset(pud, address);
87                         set_pte_atomic((pte_t *)pmd, pte);
88                 }
89         }
90 #endif
91 }
92
93 static int split_large_page(pte_t *kpte, unsigned long address)
94 {
95         pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
96         gfp_t gfp_flags = GFP_KERNEL;
97         unsigned long flags;
98         unsigned long addr;
99         pte_t *pbase, *tmp;
100         struct page *base;
101         int i, level;
102
103 #ifdef CONFIG_DEBUG_PAGEALLOC
104         gfp_flags = GFP_ATOMIC;
105 #endif
106         base = alloc_pages(gfp_flags, 0);
107         if (!base)
108                 return -ENOMEM;
109
110         spin_lock_irqsave(&pgd_lock, flags);
111         /*
112          * Check for races, another CPU might have split this page
113          * up for us already:
114          */
115         tmp = lookup_address(address, &level);
116         if (tmp != kpte) {
117                 WARN_ON_ONCE(1);
118                 goto out_unlock;
119         }
120
121         address = __pa(address);
122         addr = address & LARGE_PAGE_MASK;
123         pbase = (pte_t *)page_address(base);
124 #ifdef CONFIG_X86_32
125         paravirt_alloc_pt(&init_mm, page_to_pfn(base));
126 #endif
127
128         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
129                 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
130
131         /*
132          * Install the new, split up pagetable:
133          */
134         __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
135         base = NULL;
136
137 out_unlock:
138         spin_unlock_irqrestore(&pgd_lock, flags);
139
140         if (base)
141                 __free_pages(base, 0);
142
143         return 0;
144 }
145
146 static int
147 __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
148 {
149         struct page *kpte_page;
150         int level, err = 0;
151         pte_t *kpte;
152
153         BUG_ON(PageHighMem(page));
154
155 repeat:
156         kpte = lookup_address(address, &level);
157         if (!kpte)
158                 return -EINVAL;
159
160         kpte_page = virt_to_page(kpte);
161         BUG_ON(PageLRU(kpte_page));
162         BUG_ON(PageCompound(kpte_page));
163
164         prot = check_exec(prot, address);
165
166         if (level == PG_LEVEL_4K) {
167                 set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot)));
168         } else {
169                 err = split_large_page(kpte, address);
170                 if (!err)
171                         goto repeat;
172         }
173         return err;
174 }
175
176 /**
177  * change_page_attr_addr - Change page table attributes in linear mapping
178  * @address: Virtual address in linear mapping.
179  * @numpages: Number of pages to change
180  * @prot:    New page table attribute (PAGE_*)
181  *
182  * Change page attributes of a page in the direct mapping. This is a variant
183  * of change_page_attr() that also works on memory holes that do not have
184  * mem_map entry (pfn_valid() is false).
185  *
186  * See change_page_attr() documentation for more details.
187  */
188
189 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
190 {
191         int err = 0, kernel_map = 0, i;
192
193 #ifdef CONFIG_X86_64
194         if (address >= __START_KERNEL_map &&
195                         address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
196
197                 address = (unsigned long)__va(__pa(address));
198                 kernel_map = 1;
199         }
200 #endif
201
202         for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
203                 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
204
205                 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
206                         err = __change_page_attr(address, pfn_to_page(pfn), prot);
207                         if (err)
208                                 break;
209                 }
210 #ifdef CONFIG_X86_64
211                 /*
212                  * Handle kernel mapping too which aliases part of
213                  * lowmem:
214                  */
215                 if (__pa(address) < KERNEL_TEXT_SIZE) {
216                         unsigned long addr2;
217                         pgprot_t prot2;
218
219                         addr2 = __START_KERNEL_map + __pa(address);
220                         /* Make sure the kernel mappings stay executable */
221                         prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
222                         err = __change_page_attr(addr2, pfn_to_page(pfn), prot2);
223                 }
224 #endif
225         }
226
227         return err;
228 }
229
230 /**
231  * change_page_attr - Change page table attributes in the linear mapping.
232  * @page: First page to change
233  * @numpages: Number of pages to change
234  * @prot: New protection/caching type (PAGE_*)
235  *
236  * Returns 0 on success, otherwise a negated errno.
237  *
238  * This should be used when a page is mapped with a different caching policy
239  * than write-back somewhere - some CPUs do not like it when mappings with
240  * different caching policies exist. This changes the page attributes of the
241  * in kernel linear mapping too.
242  *
243  * Caller must call global_flush_tlb() later to make the changes active.
244  *
245  * The caller needs to ensure that there are no conflicting mappings elsewhere
246  * (e.g. in user space) * This function only deals with the kernel linear map.
247  *
248  * For MMIO areas without mem_map use change_page_attr_addr() instead.
249  */
250 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
251 {
252         unsigned long addr = (unsigned long)page_address(page);
253
254         return change_page_attr_addr(addr, numpages, prot);
255 }
256 EXPORT_SYMBOL(change_page_attr);
257
258 static void flush_kernel_map(void *arg)
259 {
260         /*
261          * Flush all to work around Errata in early athlons regarding
262          * large page flushing.
263          */
264         __flush_tlb_all();
265
266         if (boot_cpu_data.x86_model >= 4)
267                 wbinvd();
268 }
269
270 void global_flush_tlb(void)
271 {
272         BUG_ON(irqs_disabled());
273
274         on_each_cpu(flush_kernel_map, NULL, 1, 1);
275 }
276 EXPORT_SYMBOL(global_flush_tlb);
277
278 #ifdef CONFIG_DEBUG_PAGEALLOC
279 void kernel_map_pages(struct page *page, int numpages, int enable)
280 {
281         if (PageHighMem(page))
282                 return;
283         if (!enable) {
284                 debug_check_no_locks_freed(page_address(page),
285                                            numpages * PAGE_SIZE);
286         }
287
288         /*
289          * If page allocator is not up yet then do not call c_p_a():
290          */
291         if (!debug_pagealloc_enabled)
292                 return;
293
294         /*
295          * The return value is ignored - the calls cannot fail,
296          * large pages are disabled at boot time:
297          */
298         change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
299
300         /*
301          * We should perform an IPI and flush all tlbs,
302          * but that can deadlock->flush only current cpu:
303          */
304         __flush_tlb_all();
305 }
306 #endif