be4656403d77db5abf098417ad4603ae830dab18
[sfrench/cifs-2.6.git] / arch / x86 / mm / pageattr_32.c
1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/sections.h>
15 #include <asm/uaccess.h>
16 #include <asm/pgalloc.h>
17
18 static DEFINE_SPINLOCK(cpa_lock);
19 static struct list_head df_list = LIST_HEAD_INIT(df_list);
20
21 pte_t *lookup_address(unsigned long address)
22 {
23         pgd_t *pgd = pgd_offset_k(address);
24         pud_t *pud;
25         pmd_t *pmd;
26
27         if (pgd_none(*pgd))
28                 return NULL;
29         pud = pud_offset(pgd, address);
30         if (pud_none(*pud))
31                 return NULL;
32         pmd = pmd_offset(pud, address);
33         if (pmd_none(*pmd))
34                 return NULL;
35         if (pmd_large(*pmd))
36                 return (pte_t *)pmd;
37
38         return pte_offset_kernel(pmd, address);
39 }
40
41 static struct page *
42 split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
43 {
44         unsigned long addr;
45         struct page *base;
46         pte_t *pbase;
47         int i;
48
49         spin_unlock_irq(&cpa_lock);
50         base = alloc_pages(GFP_KERNEL, 0);
51         spin_lock_irq(&cpa_lock);
52         if (!base)
53                 return NULL;
54
55         /*
56          * page_private is used to track the number of entries in
57          * the page table page that have non standard attributes.
58          */
59         SetPagePrivate(base);
60         page_private(base) = 0;
61
62         address = __pa(address);
63         addr = address & LARGE_PAGE_MASK;
64         pbase = (pte_t *)page_address(base);
65         paravirt_alloc_pt(&init_mm, page_to_pfn(base));
66
67         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
68                 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
69                                            addr == address ? prot : ref_prot));
70         }
71         return base;
72 }
73
74 static void cache_flush_page(struct page *p)
75 {
76         void *addr = page_address(p);
77         int i;
78
79         for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
80                 clflush(addr + i);
81 }
82
83 static void flush_kernel_map(void *arg)
84 {
85         struct list_head *lh = (struct list_head *)arg;
86         struct page *p;
87
88         /* High level code is not ready for clflush yet */
89         if (0 && cpu_has_clflush) {
90                 list_for_each_entry(p, lh, lru)
91                         cache_flush_page(p);
92         } else {
93                 if (boot_cpu_data.x86_model >= 4)
94                         wbinvd();
95         }
96
97         /*
98          * Flush all to work around Errata in early athlons regarding
99          * large page flushing.
100          */
101         __flush_tlb_all();
102 }
103
104 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
105 {
106         unsigned long flags;
107         struct page *page;
108
109         /* change init_mm */
110         set_pte_atomic(kpte, pte);
111         if (SHARED_KERNEL_PMD)
112                 return;
113
114         spin_lock_irqsave(&pgd_lock, flags);
115         for (page = pgd_list; page; page = (struct page *)page->index) {
116                 pgd_t *pgd;
117                 pud_t *pud;
118                 pmd_t *pmd;
119
120                 pgd = (pgd_t *)page_address(page) + pgd_index(address);
121                 pud = pud_offset(pgd, address);
122                 pmd = pmd_offset(pud, address);
123                 set_pte_atomic((pte_t *)pmd, pte);
124         }
125         spin_unlock_irqrestore(&pgd_lock, flags);
126 }
127
128 /*
129  * No more special protections in this 2/4MB area - revert to a large
130  * page again.
131  */
132 static inline void revert_page(struct page *kpte_page, unsigned long address)
133 {
134         pgprot_t ref_prot;
135         pte_t *linear;
136
137         ref_prot =
138         ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
139                 ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
140
141         linear = (pte_t *)
142                 pmd_offset(pud_offset(pgd_offset_k(address), address), address);
143         set_pmd_pte(linear,  address,
144                     pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
145                             ref_prot));
146 }
147
148 static inline void save_page(struct page *kpte_page)
149 {
150         if (!test_and_set_bit(PG_arch_1, &kpte_page->flags))
151                 list_add(&kpte_page->lru, &df_list);
152 }
153
154 static int __change_page_attr(struct page *page, pgprot_t prot)
155 {
156         struct page *kpte_page;
157         unsigned long address;
158         pte_t *kpte;
159
160         BUG_ON(PageHighMem(page));
161         address = (unsigned long)page_address(page);
162
163         kpte = lookup_address(address);
164         if (!kpte)
165                 return -EINVAL;
166
167         kpte_page = virt_to_page(kpte);
168         BUG_ON(PageLRU(kpte_page));
169         BUG_ON(PageCompound(kpte_page));
170
171         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
172                 if (!pte_huge(*kpte)) {
173                         set_pte_atomic(kpte, mk_pte(page, prot));
174                 } else {
175                         struct page *split;
176                         pgprot_t ref_prot;
177
178                         ref_prot =
179                         ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
180                                 ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
181                         split = split_large_page(address, prot, ref_prot);
182                         if (!split)
183                                 return -ENOMEM;
184
185                         set_pmd_pte(kpte, address, mk_pte(split, ref_prot));
186                         kpte_page = split;
187                 }
188                 page_private(kpte_page)++;
189         } else {
190                 if (!pte_huge(*kpte)) {
191                         set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
192                         BUG_ON(page_private(kpte_page) == 0);
193                         page_private(kpte_page)--;
194                 } else
195                         BUG();
196         }
197
198         /*
199          * If the pte was reserved, it means it was created at boot
200          * time (not via split_large_page) and in turn we must not
201          * replace it with a largepage.
202          */
203
204         save_page(kpte_page);
205         if (!PageReserved(kpte_page)) {
206                 if (cpu_has_pse && (page_private(kpte_page) == 0)) {
207                         paravirt_release_pt(page_to_pfn(kpte_page));
208                         revert_page(kpte_page, address);
209                 }
210         }
211         return 0;
212 }
213
214 static inline void flush_map(struct list_head *l)
215 {
216         on_each_cpu(flush_kernel_map, l, 1, 1);
217 }
218
219 /*
220  * Change the page attributes of an page in the linear mapping.
221  *
222  * This should be used when a page is mapped with a different caching policy
223  * than write-back somewhere - some CPUs do not like it when mappings with
224  * different caching policies exist. This changes the page attributes of the
225  * in kernel linear mapping too.
226  *
227  * The caller needs to ensure that there are no conflicting mappings elsewhere.
228  * This function only deals with the kernel linear map.
229  *
230  * Caller must call global_flush_tlb() after this.
231  */
232 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
233 {
234         unsigned long flags;
235         int err = 0, i;
236
237         spin_lock_irqsave(&cpa_lock, flags);
238         for (i = 0; i < numpages; i++, page++) {
239                 err = __change_page_attr(page, prot);
240                 if (err)
241                         break;
242         }
243         spin_unlock_irqrestore(&cpa_lock, flags);
244
245         return err;
246 }
247 EXPORT_SYMBOL(change_page_attr);
248
249 void global_flush_tlb(void)
250 {
251         struct page *pg, *next;
252         struct list_head l;
253
254         BUG_ON(irqs_disabled());
255
256         spin_lock_irq(&cpa_lock);
257         list_replace_init(&df_list, &l);
258         spin_unlock_irq(&cpa_lock);
259         flush_map(&l);
260         list_for_each_entry_safe(pg, next, &l, lru) {
261                 list_del(&pg->lru);
262                 clear_bit(PG_arch_1, &pg->flags);
263                 if (PageReserved(pg) || !cpu_has_pse || page_private(pg) != 0)
264                         continue;
265                 ClearPagePrivate(pg);
266                 __free_page(pg);
267         }
268 }
269 EXPORT_SYMBOL(global_flush_tlb);
270
271 #ifdef CONFIG_DEBUG_PAGEALLOC
272 void kernel_map_pages(struct page *page, int numpages, int enable)
273 {
274         if (PageHighMem(page))
275                 return;
276         if (!enable) {
277                 debug_check_no_locks_freed(page_address(page),
278                                            numpages * PAGE_SIZE);
279         }
280
281         /*
282          * the return value is ignored - the calls cannot fail,
283          * large pages are disabled at boot time.
284          */
285         change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
286
287         /*
288          * we should perform an IPI and flush all tlbs,
289          * but that can deadlock->flush only current cpu.
290          */
291         __flush_tlb_all();
292 }
293 #endif