Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[sfrench/cifs-2.6.git] / arch / arm / mm / mm-armv.c
index ef8d30a185a97574657ee66d31134b9feec76363..38769f5862bc4f0d414287bfdec754fdd431d2c7 100644 (file)
@@ -9,7 +9,6 @@
  *
  *  Page table sludge for ARM v3 and v4 processor architectures.
  */
-#include <linux/config.h>
 #include <linux/module.h>
 #include <linux/mm.h>
 #include <linux/init.h>
@@ -227,7 +226,7 @@ void free_pgd_slow(pgd_t *pgd)
 
        pte = pmd_page(*pmd);
        pmd_clear(pmd);
-       dec_page_state(nr_page_table_pages);
+       dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
        pte_lock_deinit(pte);
        pte_free(pte);
        pmd_free(pmd);
@@ -303,16 +302,16 @@ static struct mem_types mem_types[] __initdata = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_WRITE,
                .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
                                PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_IO,
        },
        [MT_CACHECLEAN] = {
-               .prot_sect = PMD_TYPE_SECT,
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_MINICLEAN] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE,
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_LOW_VECTORS] = {
@@ -328,25 +327,25 @@ static struct mem_types mem_types[] __initdata = {
                .domain    = DOMAIN_USER,
        },
        [MT_MEMORY] = {
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_ROM] = {
-               .prot_sect = PMD_TYPE_SECT,
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
                .domain    = DOMAIN_KERNEL,
        },
        [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_WRITE,
                .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED |
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
                                PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
                                PMD_SECT_TEX(1),
                .domain    = DOMAIN_IO,
        },
        [MT_NONSHARED_DEVICE] = {
                .prot_l1   = PMD_TYPE_TABLE,
-               .prot_sect = PMD_TYPE_SECT | PMD_SECT_NONSHARED_DEV |
+               .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
                                PMD_SECT_AP_WRITE,
                .domain    = DOMAIN_IO,
        }
@@ -376,18 +375,36 @@ void __init build_mem_type_table(void)
                ecc_mask = 0;
        }
 
-       if (cpu_arch <= CPU_ARCH_ARMv5TEJ) {
-               for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
+       /*
+        * Xscale must not have PMD bit 4 set for section mappings.
+        */
+       if (cpu_is_xscale())
+               for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+                       mem_types[i].prot_sect &= ~PMD_BIT4;
+
+       /*
+        * ARMv5 and lower, excluding Xscale, bit 4 must be set for
+        * page tables.
+        */
+       if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
+               for (i = 0; i < ARRAY_SIZE(mem_types); i++)
                        if (mem_types[i].prot_l1)
                                mem_types[i].prot_l1 |= PMD_BIT4;
-                       if (mem_types[i].prot_sect)
-                               mem_types[i].prot_sect |= PMD_BIT4;
-               }
-       }
 
        cp = &cache_policies[cachepolicy];
        kern_pgprot = user_pgprot = cp->pte;
 
+       /*
+        * Enable CPU-specific coherency if supported.
+        * (Only available on XSC3 at the moment.)
+        */
+       if (arch_is_coherent()) {
+               if (cpu_is_xsc3()) {
+                       mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+                       mem_types[MT_MEMORY].prot_pte |= L_PTE_COHERENT;
+               }
+       }
+
        /*
         * ARMv6 and above have extended page tables.
         */
@@ -396,8 +413,8 @@ void __init build_mem_type_table(void)
                 * bit 4 becomes XN which we must clear for the
                 * kernel memory mapping.
                 */
-               mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4;
-               mem_types[MT_ROM].prot_sect &= ~PMD_BIT4;
+               mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
+               mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
 
                /*
                 * Mark cache clean areas and XIP ROM read only
@@ -557,7 +574,8 @@ void __init create_mapping(struct map_desc *md)
         *      supersections are only allocated for domain 0 regardless
         *      of the actual domain assignments in use.
         */
-       if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) {
+       if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())
+               && domain == 0) {
                /*
                 * Align to supersection boundary if !high pages.
                 * High pages have already been checked for proper
@@ -619,7 +637,7 @@ void setup_mm_for_reboot(char mode)
                pgd = init_mm.pgd;
 
        base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
-       if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ)
+       if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
                base_pmdval |= PMD_BIT4;
 
        for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {