* Thanks to Ben LaHaise for precious feedback.
*/
-#include <linux/config.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
return base;
}
-
-static void flush_kernel_map(void *address)
+static void cache_flush_page(void *adr)
{
- if (0 && address && cpu_has_clflush) {
- /* is this worth it? */
- int i;
- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
- asm volatile("clflush (%0)" :: "r" (address + i));
- } else
- asm volatile("wbinvd":::"memory");
- if (address)
- __flush_tlb_one(address);
- else
- __flush_tlb_all();
+ int i;
+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+ asm volatile("clflush (%0)" :: "r" (adr + i));
}
+static void flush_kernel_map(void *arg)
+{
+ struct list_head *l = (struct list_head *)arg;
+ struct page *pg;
+
+ /* When clflush is available always use it because it is
+ much cheaper than WBINVD */
+ if (!cpu_has_clflush)
+ asm volatile("wbinvd" ::: "memory");
+ list_for_each_entry(pg, l, lru) {
+ void *adr = page_address(pg);
+ if (cpu_has_clflush)
+ cache_flush_page(adr);
+ __flush_tlb_one(adr);
+ }
+}
-static inline void flush_map(unsigned long address)
+static inline void flush_map(struct list_head *l)
{
- on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
+ on_each_cpu(flush_kernel_map, l, 1, 1);
}
-static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
+static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
static inline void save_page(struct page *fpage)
{
- fpage->lru.next = (struct list_head *)deferred_pages;
- deferred_pages = fpage;
+ list_add(&fpage->lru, &deferred_pages);
}
/*
pud_t *pud;
pmd_t *pmd;
pte_t large_pte;
+ unsigned long pfn;
pgd = pgd_offset_k(address);
BUG_ON(pgd_none(*pgd));
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, address);
BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
- pgprot_val(ref_prot) |= _PAGE_PSE;
- large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
+ pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
+ large_pte = pfn_pte(pfn, ref_prot);
+ large_pte = pte_mkhuge(large_pte);
set_pte((pte_t *)pmd, large_pte);
}
{
pte_t *kpte;
struct page *kpte_page;
- unsigned kpte_flags;
pgprot_t ref_prot2;
kpte = lookup_address(address);
if (!kpte) return 0;
kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
- kpte_flags = pte_val(*kpte);
if (pgprot_val(prot) != pgprot_val(ref_prot)) {
- if ((kpte_flags & _PAGE_PSE) == 0) {
+ if (!pte_huge(*kpte)) {
set_pte(kpte, pfn_pte(pfn, prot));
} else {
/*
* split_large_page will take the reference for this
* change_page_attr on the split page.
*/
-
struct page *split;
- ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
-
+ ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
split = split_large_page(address, prot, ref_prot2);
if (!split)
return -ENOMEM;
- set_pte(kpte,mk_pte(split, ref_prot2));
+ set_pte(kpte, mk_pte(split, ref_prot2));
kpte_page = split;
- }
+ }
page_private(kpte_page)++;
- } else if ((kpte_flags & _PAGE_PSE) == 0) {
+ } else if (!pte_huge(*kpte)) {
set_pte(kpte, pfn_pte(pfn, ref_prot));
BUG_ON(page_private(kpte_page) == 0);
page_private(kpte_page)--;
* lowmem */
if (__pa(address) < KERNEL_TEXT_SIZE) {
unsigned long addr2;
- pgprot_t prot2 = prot;
+ pgprot_t prot2;
addr2 = __START_KERNEL_map + __pa(address);
- pgprot_val(prot2) &= ~_PAGE_NX;
- err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
+ /* Make sure the kernel mappings stay executable */
+ prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
+ err = __change_page_attr(addr2, pfn, prot2,
+ PAGE_KERNEL_EXEC);
}
}
up_write(&init_mm.mmap_sem);
void global_flush_tlb(void)
{
- struct page *dpage;
+ struct page *pg, *next;
+ struct list_head l;
down_read(&init_mm.mmap_sem);
- dpage = xchg(&deferred_pages, NULL);
+ list_replace_init(&deferred_pages, &l);
up_read(&init_mm.mmap_sem);
- flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
- while (dpage) {
- struct page *tmp = dpage;
- dpage = (struct page *)dpage->lru.next;
- ClearPagePrivate(tmp);
- __free_page(tmp);
+ flush_map(&l);
+
+ list_for_each_entry_safe(pg, next, &l, lru) {
+ ClearPagePrivate(pg);
+ __free_page(pg);
}
}