x86: return the page table level in lookup_address()
[sfrench/cifs-2.6.git] / arch / x86 / mm / pageattr_64.c
1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/uaccess.h>
15 #include <asm/io.h>
16
17 pte_t *lookup_address(unsigned long address, int *level)
18 {
19         pgd_t *pgd = pgd_offset_k(address);
20         pud_t *pud;
21         pmd_t *pmd;
22         pte_t *pte;
23
24         if (pgd_none(*pgd))
25                 return NULL;
26         pud = pud_offset(pgd, address);
27         if (!pud_present(*pud))
28                 return NULL;
29         pmd = pmd_offset(pud, address);
30         if (!pmd_present(*pmd))
31                 return NULL;
32         *level = 3;
33         if (pmd_large(*pmd))
34                 return (pte_t *)pmd;
35         *level = 4;
36
37         pte = pte_offset_kernel(pmd, address);
38         if (pte && !pte_present(*pte))
39                 pte = NULL;
40
41         return pte;
42 }
43
44 static struct page *
45 split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
46 {
47         unsigned long addr;
48         struct page *base;
49         pte_t *pbase;
50         int i;
51
52         base = alloc_pages(GFP_KERNEL, 0);
53         if (!base)
54                 return NULL;
55         /*
56          * page_private is used to track the number of entries in
57          * the page table page have non standard attributes.
58          */
59         SetPagePrivate(base);
60         page_private(base) = 0;
61
62         address = __pa(address);
63         addr = address & LARGE_PAGE_MASK;
64         pbase = (pte_t *)page_address(base);
65         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
66                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
67                                    addr == address ? prot : ref_prot);
68         }
69         return base;
70 }
71
72 void clflush_cache_range(void *addr, int size)
73 {
74         int i;
75
76         for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
77                 clflush(addr+i);
78 }
79
80 static void flush_kernel_map(void *arg)
81 {
82         struct list_head *l = (struct list_head *)arg;
83         struct page *pg;
84
85         /* When clflush is available always use it because it is
86            much cheaper than WBINVD. */
87         /* clflush is still broken. Disable for now. */
88         if (1 || !cpu_has_clflush) {
89                 asm volatile("wbinvd" ::: "memory");
90         } else {
91                 list_for_each_entry(pg, l, lru) {
92                         void *addr = page_address(pg);
93
94                         clflush_cache_range(addr, PAGE_SIZE);
95                 }
96         }
97         __flush_tlb_all();
98 }
99
100 static inline void flush_map(struct list_head *l)
101 {
102         on_each_cpu(flush_kernel_map, l, 1, 1);
103 }
104
105 static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
106
107 static inline void save_page(struct page *fpage)
108 {
109         if (!test_and_set_bit(PG_arch_1, &fpage->flags))
110                 list_add(&fpage->lru, &deferred_pages);
111 }
112
113 /*
114  * No more special protections in this 2/4MB area - revert to a
115  * large page again.
116  */
117 static void revert_page(unsigned long address, pgprot_t ref_prot)
118 {
119         unsigned long pfn;
120         pgd_t *pgd;
121         pud_t *pud;
122         pmd_t *pmd;
123         pte_t large_pte;
124
125         pgd = pgd_offset_k(address);
126         BUG_ON(pgd_none(*pgd));
127         pud = pud_offset(pgd, address);
128         BUG_ON(pud_none(*pud));
129         pmd = pmd_offset(pud, address);
130         BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
131         pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
132         large_pte = pfn_pte(pfn, ref_prot);
133         large_pte = pte_mkhuge(large_pte);
134
135         set_pte((pte_t *)pmd, large_pte);
136 }
137
138 static int
139 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
140                    pgprot_t ref_prot)
141 {
142         struct page *kpte_page;
143         pgprot_t ref_prot2;
144         pte_t *kpte;
145         int level;
146
147         kpte = lookup_address(address, &level);
148         if (!kpte)
149                 return 0;
150
151         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
152         BUG_ON(PageLRU(kpte_page));
153         BUG_ON(PageCompound(kpte_page));
154         if (pgprot_val(prot) != pgprot_val(ref_prot)) {
155                 if (!pte_huge(*kpte)) {
156                         set_pte(kpte, pfn_pte(pfn, prot));
157                 } else {
158                         /*
159                          * split_large_page will take the reference for this
160                          * change_page_attr on the split page.
161                          */
162                         struct page *split;
163
164                         ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
165                         split = split_large_page(address, prot, ref_prot2);
166                         if (!split)
167                                 return -ENOMEM;
168                         pgprot_val(ref_prot2) &= ~_PAGE_NX;
169                         set_pte(kpte, mk_pte(split, ref_prot2));
170                         kpte_page = split;
171                 }
172                 page_private(kpte_page)++;
173         } else {
174                 if (!pte_huge(*kpte)) {
175                         set_pte(kpte, pfn_pte(pfn, ref_prot));
176                         BUG_ON(page_private(kpte_page) == 0);
177                         page_private(kpte_page)--;
178                 } else
179                         BUG();
180         }
181
182         /* on x86-64 the direct mapping set at boot is not using 4k pages */
183         BUG_ON(PageReserved(kpte_page));
184
185         save_page(kpte_page);
186         if (page_private(kpte_page) == 0)
187                 revert_page(address, ref_prot);
188         return 0;
189 }
190
191 /*
192  * Change the page attributes of an page in the linear mapping.
193  *
194  * This should be used when a page is mapped with a different caching policy
195  * than write-back somewhere - some CPUs do not like it when mappings with
196  * different caching policies exist. This changes the page attributes of the
197  * in kernel linear mapping too.
198  *
199  * The caller needs to ensure that there are no conflicting mappings elsewhere.
200  * This function only deals with the kernel linear map.
201  *
202  * Caller must call global_flush_tlb() after this.
203  */
204 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
205 {
206         int err = 0, kernel_map = 0, i;
207
208         if (address >= __START_KERNEL_map &&
209                         address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
210
211                 address = (unsigned long)__va(__pa(address));
212                 kernel_map = 1;
213         }
214
215         down_write(&init_mm.mmap_sem);
216         for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
217                 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
218
219                 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
220                         err = __change_page_attr(address, pfn, prot,
221                                                 PAGE_KERNEL);
222                         if (err)
223                                 break;
224                 }
225                 /* Handle kernel mapping too which aliases part of the
226                  * lowmem */
227                 if (__pa(address) < KERNEL_TEXT_SIZE) {
228                         unsigned long addr2;
229                         pgprot_t prot2;
230
231                         addr2 = __START_KERNEL_map + __pa(address);
232                         /* Make sure the kernel mappings stay executable */
233                         prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
234                         err = __change_page_attr(addr2, pfn, prot2,
235                                                  PAGE_KERNEL_EXEC);
236                 }
237         }
238         up_write(&init_mm.mmap_sem);
239
240         return err;
241 }
242
243 /* Don't call this for MMIO areas that may not have a mem_map entry */
244 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
245 {
246         unsigned long addr = (unsigned long)page_address(page);
247
248         return change_page_attr_addr(addr, numpages, prot);
249 }
250 EXPORT_SYMBOL(change_page_attr);
251
252 void global_flush_tlb(void)
253 {
254         struct page *pg, *next;
255         struct list_head l;
256
257         /*
258          * Write-protect the semaphore, to exclude two contexts
259          * doing a list_replace_init() call in parallel and to
260          * exclude new additions to the deferred_pages list:
261          */
262         down_write(&init_mm.mmap_sem);
263         list_replace_init(&deferred_pages, &l);
264         up_write(&init_mm.mmap_sem);
265
266         flush_map(&l);
267
268         list_for_each_entry_safe(pg, next, &l, lru) {
269                 list_del(&pg->lru);
270                 clear_bit(PG_arch_1, &pg->flags);
271                 if (page_private(pg) != 0)
272                         continue;
273                 ClearPagePrivate(pg);
274                 __free_page(pg);
275         }
276 }
277 EXPORT_SYMBOL(global_flush_tlb);