[PATCH] x86-64: Remove mk_pte_phys()
[sfrench/cifs-2.6.git] / arch / x86_64 / mm / pageattr.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <asm/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/io.h>
15
16 static inline pte_t *lookup_address(unsigned long address) 
17
18         pgd_t *pgd = pgd_offset_k(address);
19         pud_t *pud;
20         pmd_t *pmd;
21         pte_t *pte;
22         if (pgd_none(*pgd))
23                 return NULL;
24         pud = pud_offset(pgd, address);
25         if (!pud_present(*pud))
26                 return NULL; 
27         pmd = pmd_offset(pud, address);
28         if (!pmd_present(*pmd))
29                 return NULL; 
30         if (pmd_large(*pmd))
31                 return (pte_t *)pmd;
32         pte = pte_offset_kernel(pmd, address);
33         if (pte && !pte_present(*pte))
34                 pte = NULL; 
35         return pte;
36
37
38 static struct page *split_large_page(unsigned long address, pgprot_t prot,
39                                      pgprot_t ref_prot)
40
41         int i; 
42         unsigned long addr;
43         struct page *base = alloc_pages(GFP_KERNEL, 0);
44         pte_t *pbase;
45         if (!base) 
46                 return NULL;
47         /*
48          * page_private is used to track the number of entries in
49          * the page table page have non standard attributes.
50          */
51         SetPagePrivate(base);
52         page_private(base) = 0;
53
54         address = __pa(address);
55         addr = address & LARGE_PAGE_MASK; 
56         pbase = (pte_t *)page_address(base);
57         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
58                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
59                                    addr == address ? prot : ref_prot);
60         }
61         return base;
62
63
64 static void cache_flush_page(void *adr)
65 {
66         int i;
67         for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
68                 asm volatile("clflush (%0)" :: "r" (adr + i));
69 }
70
71 static void flush_kernel_map(void *arg)
72 {
73         struct list_head *l = (struct list_head *)arg;
74         struct page *pg;
75
76         /* When clflush is available always use it because it is
77            much cheaper than WBINVD */
78         if (!cpu_has_clflush)
79                 asm volatile("wbinvd" ::: "memory");
80         list_for_each_entry(pg, l, lru) {
81                 void *adr = page_address(pg);
82                 if (cpu_has_clflush)
83                         cache_flush_page(adr);
84                 __flush_tlb_one(adr);
85         }
86 }
87
88 static inline void flush_map(struct list_head *l)
89 {       
90         on_each_cpu(flush_kernel_map, l, 1, 1);
91 }
92
93 static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
94
95 static inline void save_page(struct page *fpage)
96 {
97         list_add(&fpage->lru, &deferred_pages);
98 }
99
100 /* 
101  * No more special protections in this 2/4MB area - revert to a
102  * large page again. 
103  */
104 static void revert_page(unsigned long address, pgprot_t ref_prot)
105 {
106         pgd_t *pgd;
107         pud_t *pud;
108         pmd_t *pmd;
109         pte_t large_pte;
110         unsigned long pfn;
111
112         pgd = pgd_offset_k(address);
113         BUG_ON(pgd_none(*pgd));
114         pud = pud_offset(pgd,address);
115         BUG_ON(pud_none(*pud));
116         pmd = pmd_offset(pud, address);
117         BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
118         pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
119         large_pte = pfn_pte(pfn, ref_prot);
120         large_pte = pte_mkhuge(large_pte);
121         set_pte((pte_t *)pmd, large_pte);
122 }      
123
124 static int
125 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
126                                    pgprot_t ref_prot)
127
128         pte_t *kpte; 
129         struct page *kpte_page;
130         pgprot_t ref_prot2;
131         kpte = lookup_address(address);
132         if (!kpte) return 0;
133         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
134         if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
135                 if (!pte_huge(*kpte)) {
136                         set_pte(kpte, pfn_pte(pfn, prot));
137                 } else {
138                         /*
139                          * split_large_page will take the reference for this
140                          * change_page_attr on the split page.
141                          */
142                         struct page *split;
143                         ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
144                         split = split_large_page(address, prot, ref_prot2);
145                         if (!split)
146                                 return -ENOMEM;
147                         set_pte(kpte, mk_pte(split, ref_prot2));
148                         kpte_page = split;
149                 }
150                 page_private(kpte_page)++;
151         } else if (!pte_huge(*kpte)) {
152                 set_pte(kpte, pfn_pte(pfn, ref_prot));
153                 BUG_ON(page_private(kpte_page) == 0);
154                 page_private(kpte_page)--;
155         } else
156                 BUG();
157
158         /* on x86-64 the direct mapping set at boot is not using 4k pages */
159         BUG_ON(PageReserved(kpte_page));
160
161         if (page_private(kpte_page) == 0) {
162                 save_page(kpte_page);
163                 revert_page(address, ref_prot);
164         }
165         return 0;
166
167
168 /*
169  * Change the page attributes of an page in the linear mapping.
170  *
171  * This should be used when a page is mapped with a different caching policy
172  * than write-back somewhere - some CPUs do not like it when mappings with
173  * different caching policies exist. This changes the page attributes of the
174  * in kernel linear mapping too.
175  * 
176  * The caller needs to ensure that there are no conflicting mappings elsewhere.
177  * This function only deals with the kernel linear map.
178  * 
179  * Caller must call global_flush_tlb() after this.
180  */
181 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
182 {
183         int err = 0; 
184         int i; 
185
186         down_write(&init_mm.mmap_sem);
187         for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
188                 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
189
190                 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
191                 if (err) 
192                         break; 
193                 /* Handle kernel mapping too which aliases part of the
194                  * lowmem */
195                 if (__pa(address) < KERNEL_TEXT_SIZE) {
196                         unsigned long addr2;
197                         pgprot_t prot2;
198                         addr2 = __START_KERNEL_map + __pa(address);
199                         /* Make sure the kernel mappings stay executable */
200                         prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
201                         err = __change_page_attr(addr2, pfn, prot2,
202                                                  PAGE_KERNEL_EXEC);
203                 } 
204         }       
205         up_write(&init_mm.mmap_sem); 
206         return err;
207 }
208
209 /* Don't call this for MMIO areas that may not have a mem_map entry */
210 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
211 {
212         unsigned long addr = (unsigned long)page_address(page);
213         return change_page_attr_addr(addr, numpages, prot);
214 }
215
216 void global_flush_tlb(void)
217
218         struct page *pg, *next;
219         struct list_head l;
220
221         down_read(&init_mm.mmap_sem);
222         list_replace_init(&deferred_pages, &l);
223         up_read(&init_mm.mmap_sem);
224
225         flush_map(&l);
226
227         list_for_each_entry_safe(pg, next, &l, lru) {
228                 ClearPagePrivate(pg);
229                 __free_page(pg);
230         } 
231
232
233 EXPORT_SYMBOL(change_page_attr);
234 EXPORT_SYMBOL(global_flush_tlb);