Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
[sfrench/cifs-2.6.git] / arch / x86 / mm / pgtable_32.c
index ef1f6cd3ea66d7cacce1c70ee2d22106bfe82e3f..2f9e9afcb9f4270d6e5f5a496f33999d6ce8298b 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/mm.h>
+#include <linux/nmi.h>
 #include <linux/swap.h>
 #include <linux/smp.h>
 #include <linux/highmem.h>
@@ -39,6 +40,8 @@ void show_mem(void)
        for_each_online_pgdat(pgdat) {
                pgdat_resize_lock(pgdat, &flags);
                for (i = 0; i < pgdat->node_spanned_pages; ++i) {
+                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
+                               touch_nmi_watchdog();
                        page = pgdat_page_nr(pgdat, i);
                        total++;
                        if (PageHighMem(page))
@@ -97,8 +100,7 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
        }
        pte = pte_offset_kernel(pmd, vaddr);
        if (pgprot_val(flags))
-               /* <pfn,flags> stored as-is, to permit clearing entries */
-               set_pte(pte, pfn_pte(pfn, flags));
+               set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
        else
                pte_clear(&init_mm, vaddr, pte);
 
@@ -181,7 +183,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
        return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
 }
 
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *pte;
 
@@ -190,14 +192,11 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
 #else
        pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
 #endif
+       if (pte)
+               pgtable_page_ctor(pte);
        return pte;
 }
 
-void pmd_ctor(struct kmem_cache *cache, void *pmd)
-{
-       memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-}
-
 /*
  * List of all pgd's needed for non-PAE so it can invalidate entries
  * in both cached and uncached pgd's; not needed for PAE since the
@@ -208,73 +207,53 @@ void pmd_ctor(struct kmem_cache *cache, void *pmd)
  * vmalloc faults work because attached pagetables are never freed.
  * -- wli
  */
-DEFINE_SPINLOCK(pgd_lock);
-struct page *pgd_list;
-
 static inline void pgd_list_add(pgd_t *pgd)
 {
        struct page *page = virt_to_page(pgd);
-       page->index = (unsigned long)pgd_list;
-       if (pgd_list)
-               set_page_private(pgd_list, (unsigned long)&page->index);
-       pgd_list = page;
-       set_page_private(page, (unsigned long)&pgd_list);
+
+       list_add(&page->lru, &pgd_list);
 }
 
 static inline void pgd_list_del(pgd_t *pgd)
 {
-       struct page *next, **pprev, *page = virt_to_page(pgd);
-       next = (struct page *)page->index;
-       pprev = (struct page **)page_private(page);
-       *pprev = next;
-       if (next)
-               set_page_private(next, (unsigned long)pprev);
-}
+       struct page *page = virt_to_page(pgd);
 
+       list_del(&page->lru);
+}
 
+#define UNSHARED_PTRS_PER_PGD                          \
+       (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
 
-#if (PTRS_PER_PMD == 1)
-/* Non-PAE pgd constructor */
-static void pgd_ctor(void *pgd)
+static void pgd_ctor(void *p)
 {
+       pgd_t *pgd = p;
        unsigned long flags;
 
-       /* !PAE, no pagetable sharing */
+       /* Clear usermode parts of PGD */
        memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
 
        spin_lock_irqsave(&pgd_lock, flags);
 
-       /* must happen under lock */
-       clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
-                       swapper_pg_dir + USER_PTRS_PER_PGD,
-                       KERNEL_PGD_PTRS);
-       paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
-                               __pa(swapper_pg_dir) >> PAGE_SHIFT,
-                               USER_PTRS_PER_PGD,
-                               KERNEL_PGD_PTRS);
-       pgd_list_add(pgd);
-       spin_unlock_irqrestore(&pgd_lock, flags);
-}
-#else  /* PTRS_PER_PMD > 1 */
-/* PAE pgd constructor */
-static void pgd_ctor(void *pgd)
-{
-       /* PAE, kernel PMD may be shared */
-
-       if (SHARED_KERNEL_PMD) {
-               clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
+       /* If the pgd points to a shared pagetable level (either the
+          ptes in non-PAE, or shared PMD in PAE), then just copy the
+          references from swapper_pg_dir. */
+       if (PAGETABLE_LEVELS == 2 ||
+           (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
+               clone_pgd_range(pgd + USER_PTRS_PER_PGD,
                                swapper_pg_dir + USER_PTRS_PER_PGD,
                                KERNEL_PGD_PTRS);
-       } else {
-               unsigned long flags;
+               paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
+                                       __pa(swapper_pg_dir) >> PAGE_SHIFT,
+                                       USER_PTRS_PER_PGD,
+                                       KERNEL_PGD_PTRS);
+       }
 
-               memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-               spin_lock_irqsave(&pgd_lock, flags);
+       /* list required to sync kernel mapping updates */
+       if (!SHARED_KERNEL_PMD)
                pgd_list_add(pgd);
-               spin_unlock_irqrestore(&pgd_lock, flags);
-       }
+
+       spin_unlock_irqrestore(&pgd_lock, flags);
 }
-#endif /* PTRS_PER_PMD */
 
 static void pgd_dtor(void *pgd)
 {
@@ -283,91 +262,122 @@ static void pgd_dtor(void *pgd)
        if (SHARED_KERNEL_PMD)
                return;
 
-       paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
        spin_lock_irqsave(&pgd_lock, flags);
        pgd_list_del(pgd);
        spin_unlock_irqrestore(&pgd_lock, flags);
 }
 
-#define UNSHARED_PTRS_PER_PGD                          \
-       (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
+#ifdef CONFIG_X86_PAE
+/*
+ * Mop up any pmd pages which may still be attached to the pgd.
+ * Normally they will be freed by munmap/exit_mmap, but any pmd we
+ * preallocate which never got a corresponding vma will need to be
+ * freed manually.
+ */
+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
+{
+       int i;
+
+       for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
+               pgd_t pgd = pgdp[i];
 
-/* If we allocate a pmd for part of the kernel address space, then
-   make sure its initialized with the appropriate kernel mappings.
-   Otherwise use a cached zeroed pmd.  */
-static pmd_t *pmd_cache_alloc(int idx)
+               if (pgd_val(pgd) != 0) {
+                       pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+
+                       pgdp[i] = native_make_pgd(0);
+
+                       paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
+                       pmd_free(mm, pmd);
+               }
+       }
+}
+
+/*
+ * In PAE mode, we need to do a cr3 reload (=tlb flush) when
+ * updating the top-level pagetable entries to guarantee the
+ * processor notices the update.  Since this is expensive, and
+ * all 4 top-level entries are used almost immediately in a
+ * new process's life, we just pre-populate them here.
+ *
+ * Also, if we're in a paravirt environment where the kernel pmd is
+ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
+ * and initialize the kernel pmds here.
+ */
+static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
 {
-       pmd_t *pmd;
+       pud_t *pud;
+       unsigned long addr;
+       int i;
 
-       if (idx >= USER_PTRS_PER_PGD) {
-               pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
+       pud = pud_offset(pgd, 0);
+       for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
+            i++, pud++, addr += PUD_SIZE) {
+               pmd_t *pmd = pmd_alloc_one(mm, addr);
+
+               if (!pmd) {
+                       pgd_mop_up_pmds(mm, pgd);
+                       return 0;
+               }
 
-               if (pmd)
-                       memcpy(pmd,
-                              (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
+               if (i >= USER_PTRS_PER_PGD)
+                       memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
                               sizeof(pmd_t) * PTRS_PER_PMD);
-       } else
-               pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
 
-       return pmd;
+               pud_populate(mm, pud, pmd);
+       }
+
+       return 1;
+}
+#else  /* !CONFIG_X86_PAE */
+/* No need to prepopulate any pagetable entries in non-PAE modes. */
+static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
+{
+       return 1;
 }
 
-static void pmd_cache_free(pmd_t *pmd, int idx)
+static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
 {
-       if (idx >= USER_PTRS_PER_PGD)
-               free_page((unsigned long)pmd);
-       else
-               kmem_cache_free(pmd_cache, pmd);
 }
+#endif /* CONFIG_X86_PAE */
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       int i;
-       pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
-
-       if (PTRS_PER_PMD == 1 || !pgd)
-               return pgd;
+       pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 
-       for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
-               pmd_t *pmd = pmd_cache_alloc(i);
+       /* so that alloc_pd can use it */
+       mm->pgd = pgd;
+       if (pgd)
+               pgd_ctor(pgd);
 
-               if (!pmd)
-                       goto out_oom;
-
-               paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
-               set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
+       if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
+               pgd_dtor(pgd);
+               free_page((unsigned long)pgd);
+               pgd = NULL;
        }
-       return pgd;
 
-out_oom:
-       for (i--; i >= 0; i--) {
-               pgd_t pgdent = pgd[i];
-               void* pmd = (void *)__va(pgd_val(pgdent)-1);
-               paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
-               pmd_cache_free(pmd, i);
-       }
-       quicklist_free(0, pgd_dtor, pgd);
-       return NULL;
+       return pgd;
 }
 
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-       int i;
+       pgd_mop_up_pmds(mm, pgd);
+       pgd_dtor(pgd);
+       free_page((unsigned long)pgd);
+}
 
-       /* in the PAE case user pgd entries are overwritten before usage */
-       if (PTRS_PER_PMD > 1)
-               for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
-                       pgd_t pgdent = pgd[i];
-                       void* pmd = (void *)__va(pgd_val(pgdent)-1);
-                       paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
-                       pmd_cache_free(pmd, i);
-               }
-       /* in the non-PAE case, free_pgtables() clears user pgd entries */
-       quicklist_free(0, pgd_dtor, pgd);
+void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
+{
+       pgtable_page_dtor(pte);
+       paravirt_release_pt(page_to_pfn(pte));
+       tlb_remove_page(tlb, pte);
 }
 
-void check_pgt_cache(void)
+#ifdef CONFIG_X86_PAE
+
+void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
 {
-       quicklist_trim(0, pgd_dtor, 25, 16);
+       paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
+       tlb_remove_page(tlb, virt_to_page(pmd));
 }
 
+#endif