14ab327cde0c920d1ca8a8a4870fee2fae6d97c0
[sfrench/cifs-2.6.git] / arch / x86 / mm / pageattr_64.c
1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/uaccess.h>
15 #include <asm/io.h>
16
17 pte_t *lookup_address(unsigned long address)
18 {
19         pgd_t *pgd = pgd_offset_k(address);
20         pud_t *pud;
21         pmd_t *pmd;
22         pte_t *pte;
23
24         if (pgd_none(*pgd))
25                 return NULL;
26         pud = pud_offset(pgd, address);
27         if (!pud_present(*pud))
28                 return NULL;
29         pmd = pmd_offset(pud, address);
30         if (!pmd_present(*pmd))
31                 return NULL;
32         if (pmd_large(*pmd))
33                 return (pte_t *)pmd;
34
35         pte = pte_offset_kernel(pmd, address);
36         if (pte && !pte_present(*pte))
37                 pte = NULL;
38
39         return pte;
40 }
41
42 static struct page *
43 split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
44 {
45         unsigned long addr;
46         struct page *base;
47         pte_t *pbase;
48         int i;
49
50         base = alloc_pages(GFP_KERNEL, 0);
51         if (!base)
52                 return NULL;
53         /*
54          * page_private is used to track the number of entries in
55          * the page table page have non standard attributes.
56          */
57         SetPagePrivate(base);
58         page_private(base) = 0;
59
60         address = __pa(address);
61         addr = address & LARGE_PAGE_MASK;
62         pbase = (pte_t *)page_address(base);
63         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
64                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
65                                    addr == address ? prot : ref_prot);
66         }
67         return base;
68 }
69
70 void clflush_cache_range(void *addr, int size)
71 {
72         int i;
73
74         for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
75                 clflush(addr+i);
76 }
77
78 static void flush_kernel_map(void *arg)
79 {
80         struct list_head *l = (struct list_head *)arg;
81         struct page *pg;
82
83         /* When clflush is available always use it because it is
84            much cheaper than WBINVD. */
85         /* clflush is still broken. Disable for now. */
86         if (1 || !cpu_has_clflush) {
87                 asm volatile("wbinvd" ::: "memory");
88         } else {
89                 list_for_each_entry(pg, l, lru) {
90                         void *addr = page_address(pg);
91
92                         clflush_cache_range(addr, PAGE_SIZE);
93                 }
94         }
95         __flush_tlb_all();
96 }
97
98 static inline void flush_map(struct list_head *l)
99 {
100         on_each_cpu(flush_kernel_map, l, 1, 1);
101 }
102
103 static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
104
105 static inline void save_page(struct page *fpage)
106 {
107         if (!test_and_set_bit(PG_arch_1, &fpage->flags))
108                 list_add(&fpage->lru, &deferred_pages);
109 }
110
111 /*
112  * No more special protections in this 2/4MB area - revert to a
113  * large page again.
114  */
115 static void revert_page(unsigned long address, pgprot_t ref_prot)
116 {
117         unsigned long pfn;
118         pgd_t *pgd;
119         pud_t *pud;
120         pmd_t *pmd;
121         pte_t large_pte;
122
123         pgd = pgd_offset_k(address);
124         BUG_ON(pgd_none(*pgd));
125         pud = pud_offset(pgd, address);
126         BUG_ON(pud_none(*pud));
127         pmd = pmd_offset(pud, address);
128         BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
129         pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
130         large_pte = pfn_pte(pfn, ref_prot);
131         large_pte = pte_mkhuge(large_pte);
132
133         set_pte((pte_t *)pmd, large_pte);
134 }
135
136 static int
137 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
138                    pgprot_t ref_prot)
139 {
140         struct page *kpte_page;
141         pgprot_t ref_prot2;
142         pte_t *kpte;
143
144         kpte = lookup_address(address);
145         if (!kpte)
146                 return 0;
147
148         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
149         BUG_ON(PageLRU(kpte_page));
150         BUG_ON(PageCompound(kpte_page));
151         if (pgprot_val(prot) != pgprot_val(ref_prot)) {
152                 if (!pte_huge(*kpte)) {
153                         set_pte(kpte, pfn_pte(pfn, prot));
154                 } else {
155                         /*
156                          * split_large_page will take the reference for this
157                          * change_page_attr on the split page.
158                          */
159                         struct page *split;
160
161                         ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
162                         split = split_large_page(address, prot, ref_prot2);
163                         if (!split)
164                                 return -ENOMEM;
165                         pgprot_val(ref_prot2) &= ~_PAGE_NX;
166                         set_pte(kpte, mk_pte(split, ref_prot2));
167                         kpte_page = split;
168                 }
169                 page_private(kpte_page)++;
170         } else {
171                 if (!pte_huge(*kpte)) {
172                         set_pte(kpte, pfn_pte(pfn, ref_prot));
173                         BUG_ON(page_private(kpte_page) == 0);
174                         page_private(kpte_page)--;
175                 } else
176                         BUG();
177         }
178
179         /* on x86-64 the direct mapping set at boot is not using 4k pages */
180         BUG_ON(PageReserved(kpte_page));
181
182         save_page(kpte_page);
183         if (page_private(kpte_page) == 0)
184                 revert_page(address, ref_prot);
185         return 0;
186 }
187
188 /*
189  * Change the page attributes of an page in the linear mapping.
190  *
191  * This should be used when a page is mapped with a different caching policy
192  * than write-back somewhere - some CPUs do not like it when mappings with
193  * different caching policies exist. This changes the page attributes of the
194  * in kernel linear mapping too.
195  *
196  * The caller needs to ensure that there are no conflicting mappings elsewhere.
197  * This function only deals with the kernel linear map.
198  *
199  * Caller must call global_flush_tlb() after this.
200  */
201 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
202 {
203         int err = 0, kernel_map = 0, i;
204
205         if (address >= __START_KERNEL_map &&
206                         address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
207
208                 address = (unsigned long)__va(__pa(address));
209                 kernel_map = 1;
210         }
211
212         down_write(&init_mm.mmap_sem);
213         for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
214                 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
215
216                 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
217                         err = __change_page_attr(address, pfn, prot,
218                                                 PAGE_KERNEL);
219                         if (err)
220                                 break;
221                 }
222                 /* Handle kernel mapping too which aliases part of the
223                  * lowmem */
224                 if (__pa(address) < KERNEL_TEXT_SIZE) {
225                         unsigned long addr2;
226                         pgprot_t prot2;
227
228                         addr2 = __START_KERNEL_map + __pa(address);
229                         /* Make sure the kernel mappings stay executable */
230                         prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
231                         err = __change_page_attr(addr2, pfn, prot2,
232                                                  PAGE_KERNEL_EXEC);
233                 }
234         }
235         up_write(&init_mm.mmap_sem);
236
237         return err;
238 }
239
240 /* Don't call this for MMIO areas that may not have a mem_map entry */
241 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
242 {
243         unsigned long addr = (unsigned long)page_address(page);
244
245         return change_page_attr_addr(addr, numpages, prot);
246 }
247 EXPORT_SYMBOL(change_page_attr);
248
249 void global_flush_tlb(void)
250 {
251         struct page *pg, *next;
252         struct list_head l;
253
254         /*
255          * Write-protect the semaphore, to exclude two contexts
256          * doing a list_replace_init() call in parallel and to
257          * exclude new additions to the deferred_pages list:
258          */
259         down_write(&init_mm.mmap_sem);
260         list_replace_init(&deferred_pages, &l);
261         up_write(&init_mm.mmap_sem);
262
263         flush_map(&l);
264
265         list_for_each_entry_safe(pg, next, &l, lru) {
266                 list_del(&pg->lru);
267                 clear_bit(PG_arch_1, &pg->flags);
268                 if (page_private(pg) != 0)
269                         continue;
270                 ClearPagePrivate(pg);
271                 __free_page(pg);
272         }
273 }
274 EXPORT_SYMBOL(global_flush_tlb);