34acbcd62eb8c84be2b79297482078b7801a3dc3
[sfrench/cifs-2.6.git] / arch / x86 / mm / pageattr_64.c
1 /*
2  * Copyright 2002 Andi Kleen, SuSE Labs.
3  * Thanks to Ben LaHaise for precious feedback.
4  */
5
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11
12 void clflush_cache_range(void *addr, int size)
13 {
14         int i;
15
16         for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
17                 clflush(addr+i);
18 }
19
20 #include <asm/processor.h>
21 #include <asm/tlbflush.h>
22 #include <asm/sections.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
25
26 pte_t *lookup_address(unsigned long address, int *level)
27 {
28         pgd_t *pgd = pgd_offset_k(address);
29         pud_t *pud;
30         pmd_t *pmd;
31
32         if (pgd_none(*pgd))
33                 return NULL;
34         pud = pud_offset(pgd, address);
35         if (pud_none(*pud))
36                 return NULL;
37         pmd = pmd_offset(pud, address);
38         if (pmd_none(*pmd))
39                 return NULL;
40         *level = 3;
41         if (pmd_large(*pmd))
42                 return (pte_t *)pmd;
43         *level = 4;
44
45         return pte_offset_kernel(pmd, address);
46 }
47
48 static struct page *
49 split_large_page(unsigned long address, pgprot_t ref_prot)
50 {
51         unsigned long addr;
52         struct page *base;
53         pte_t *pbase;
54         int i;
55
56         base = alloc_pages(GFP_KERNEL, 0);
57         if (!base)
58                 return NULL;
59
60         address = __pa(address);
61         addr = address & LARGE_PAGE_MASK;
62         pbase = (pte_t *)page_address(base);
63         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
64                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, ref_prot);
65
66         return base;
67 }
68
69 static int
70 __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
71 {
72         struct page *kpte_page;
73         pte_t *kpte;
74         pgprot_t ref_prot2, oldprot;
75         int level;
76
77 repeat:
78         kpte = lookup_address(address, &level);
79         if (!kpte)
80                 return 0;
81
82         kpte_page = virt_to_page(kpte);
83         oldprot = pte_pgprot(*kpte);
84         BUG_ON(PageLRU(kpte_page));
85         BUG_ON(PageCompound(kpte_page));
86         prot = canon_pgprot(prot);
87
88         if (level == 4) {
89                 set_pte_atomic(kpte, mk_pte(page, prot));
90         } else {
91                 /*
92                  * split_large_page will take the reference for this
93                  * change_page_attr on the split page.
94                  */
95                 struct page *split;
96
97                 ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
98                 split = split_large_page(address, ref_prot2);
99                 if (!split)
100                         return -ENOMEM;
101                 pgprot_val(ref_prot2) &= ~_PAGE_NX;
102                 set_pte_atomic(kpte, mk_pte(split, ref_prot2));
103                 goto repeat;
104         }
105
106         return 0;
107 }
108
109 /**
110  * change_page_attr_addr - Change page table attributes in linear mapping
111  * @address: Virtual address in linear mapping.
112  * @numpages: Number of pages to change
113  * @prot:    New page table attribute (PAGE_*)
114  *
115  * Change page attributes of a page in the direct mapping. This is a variant
116  * of change_page_attr() that also works on memory holes that do not have
117  * mem_map entry (pfn_valid() is false).
118  *
119  * See change_page_attr() documentation for more details.
120  */
121
122 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
123 {
124         int err = 0, kernel_map = 0, i;
125
126         if (address >= __START_KERNEL_map &&
127                         address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
128
129                 address = (unsigned long)__va(__pa(address));
130                 kernel_map = 1;
131         }
132
133         down_write(&init_mm.mmap_sem);
134         for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
135                 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
136
137                 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
138                         err = __change_page_attr(address, pfn_to_page(pfn), prot);
139                         if (err)
140                                 break;
141                 }
142                 /* Handle kernel mapping too which aliases part of the
143                  * lowmem */
144                 if (__pa(address) < KERNEL_TEXT_SIZE) {
145                         unsigned long addr2;
146                         pgprot_t prot2;
147
148                         addr2 = __START_KERNEL_map + __pa(address);
149                         /* Make sure the kernel mappings stay executable */
150                         prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
151                         err = __change_page_attr(addr2, pfn_to_page(pfn), prot2);
152                 }
153         }
154         up_write(&init_mm.mmap_sem);
155
156         return err;
157 }
158
159 /**
160  * change_page_attr - Change page table attributes in the linear mapping.
161  * @page: First page to change
162  * @numpages: Number of pages to change
163  * @prot: New protection/caching type (PAGE_*)
164  *
165  * Returns 0 on success, otherwise a negated errno.
166  *
167  * This should be used when a page is mapped with a different caching policy
168  * than write-back somewhere - some CPUs do not like it when mappings with
169  * different caching policies exist. This changes the page attributes of the
170  * in kernel linear mapping too.
171  *
172  * Caller must call global_flush_tlb() later to make the changes active.
173  *
174  * The caller needs to ensure that there are no conflicting mappings elsewhere
175  * (e.g. in user space) * This function only deals with the kernel linear map.
176  *
177  * For MMIO areas without mem_map use change_page_attr_addr() instead.
178  */
179 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
180 {
181         unsigned long addr = (unsigned long)page_address(page);
182
183         return change_page_attr_addr(addr, numpages, prot);
184 }
185 EXPORT_SYMBOL(change_page_attr);
186
187 static void flush_kernel_map(void *arg)
188 {
189         /*
190          * Flush all to work around Errata in early athlons regarding
191          * large page flushing.
192          */
193         __flush_tlb_all();
194
195         if (boot_cpu_data.x86_model >= 4)
196                 wbinvd();
197 }
198
199 void global_flush_tlb(void)
200 {
201         BUG_ON(irqs_disabled());
202
203         on_each_cpu(flush_kernel_map, NULL, 1, 1);
204 }
205 EXPORT_SYMBOL(global_flush_tlb);