2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
6 #include <linux/highmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
12 void clflush_cache_range(void *addr, int size)
16 for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
20 #include <asm/processor.h>
21 #include <asm/tlbflush.h>
22 #include <asm/sections.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
26 pte_t *lookup_address(unsigned long address, int *level)
28 pgd_t *pgd = pgd_offset_k(address);
34 pud = pud_offset(pgd, address);
37 pmd = pmd_offset(pud, address);
45 return pte_offset_kernel(pmd, address);
49 split_large_page(unsigned long address, pgprot_t ref_prot)
56 base = alloc_pages(GFP_KERNEL, 0);
60 address = __pa(address);
61 addr = address & LARGE_PAGE_MASK;
62 pbase = (pte_t *)page_address(base);
63 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
64 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, ref_prot);
70 __change_page_attr(unsigned long address, struct page *page, pgprot_t prot)
72 struct page *kpte_page;
74 pgprot_t ref_prot2, oldprot;
78 kpte = lookup_address(address, &level);
82 kpte_page = virt_to_page(kpte);
83 oldprot = pte_pgprot(*kpte);
84 BUG_ON(PageLRU(kpte_page));
85 BUG_ON(PageCompound(kpte_page));
86 prot = canon_pgprot(prot);
89 set_pte_atomic(kpte, mk_pte(page, prot));
92 * split_large_page will take the reference for this
93 * change_page_attr on the split page.
97 ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
98 split = split_large_page(address, ref_prot2);
101 pgprot_val(ref_prot2) &= ~_PAGE_NX;
102 set_pte_atomic(kpte, mk_pte(split, ref_prot2));
110 * change_page_attr_addr - Change page table attributes in linear mapping
111 * @address: Virtual address in linear mapping.
112 * @numpages: Number of pages to change
113 * @prot: New page table attribute (PAGE_*)
115 * Change page attributes of a page in the direct mapping. This is a variant
116 * of change_page_attr() that also works on memory holes that do not have
117 * mem_map entry (pfn_valid() is false).
119 * See change_page_attr() documentation for more details.
122 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
124 int err = 0, kernel_map = 0, i;
126 if (address >= __START_KERNEL_map &&
127 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
129 address = (unsigned long)__va(__pa(address));
133 down_write(&init_mm.mmap_sem);
134 for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
135 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
137 if (!kernel_map || pte_present(pfn_pte(0, prot))) {
138 err = __change_page_attr(address, pfn_to_page(pfn), prot);
142 /* Handle kernel mapping too which aliases part of the
144 if (__pa(address) < KERNEL_TEXT_SIZE) {
148 addr2 = __START_KERNEL_map + __pa(address);
149 /* Make sure the kernel mappings stay executable */
150 prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
151 err = __change_page_attr(addr2, pfn_to_page(pfn), prot2);
154 up_write(&init_mm.mmap_sem);
160 * change_page_attr - Change page table attributes in the linear mapping.
161 * @page: First page to change
162 * @numpages: Number of pages to change
163 * @prot: New protection/caching type (PAGE_*)
165 * Returns 0 on success, otherwise a negated errno.
167 * This should be used when a page is mapped with a different caching policy
168 * than write-back somewhere - some CPUs do not like it when mappings with
169 * different caching policies exist. This changes the page attributes of the
170 * in kernel linear mapping too.
172 * Caller must call global_flush_tlb() later to make the changes active.
174 * The caller needs to ensure that there are no conflicting mappings elsewhere
175 * (e.g. in user space) * This function only deals with the kernel linear map.
177 * For MMIO areas without mem_map use change_page_attr_addr() instead.
179 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
181 unsigned long addr = (unsigned long)page_address(page);
183 return change_page_attr_addr(addr, numpages, prot);
185 EXPORT_SYMBOL(change_page_attr);
187 static void flush_kernel_map(void *arg)
190 * Flush all to work around Errata in early athlons regarding
191 * large page flushing.
195 if (boot_cpu_data.x86_model >= 4)
199 void global_flush_tlb(void)
201 BUG_ON(irqs_disabled());
203 on_each_cpu(flush_kernel_map, NULL, 1, 1);
205 EXPORT_SYMBOL(global_flush_tlb);