x86: ioremap, remove WARN_ON()
[sfrench/cifs-2.6.git] / arch / x86 / mm / ioremap.c
index c004d94608fd4a21035684c12cf0b6af5d3d46ba..8fe576baa14870c225f00606c84967d2941defe8 100644 (file)
@@ -42,6 +42,22 @@ int page_is_ram(unsigned long pagenr)
        unsigned long addr, end;
        int i;
 
+       /*
+        * A special case is the first 4Kb of memory;
+        * This is a BIOS owned area, not kernel ram, but generally
+        * not listed as such in the E820 table.
+        */
+       if (pagenr == 0)
+               return 0;
+
+       /*
+        * Second special case: Some BIOSen report the PC BIOS
+        * area (640->1Mb) as ram even though it is not.
+        */
+       if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
+                   pagenr < (BIOS_END >> PAGE_SHIFT))
+               return 0;
+
        for (i = 0; i < e820.nr_map; i++) {
                /*
                 * Not usable memory:
@@ -51,14 +67,6 @@ int page_is_ram(unsigned long pagenr)
                addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
                end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
 
-               /*
-                * Sanity check: Some BIOSen report areas as RAM that
-                * are not. Notably the 640->1Mb area, which is the
-                * PCI BIOS area.
-                */
-               if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
-                   end < (BIOS_END >> PAGE_SHIFT))
-                       continue;
 
                if ((pagenr >= addr) && (pagenr < end))
                        return 1;
@@ -70,25 +78,12 @@ int page_is_ram(unsigned long pagenr)
  * Fix up the linear direct mapping of the kernel to avoid cache attribute
  * conflicts.
  */
-static int ioremap_change_attr(unsigned long paddr, unsigned long size,
+static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
                               enum ioremap_mode mode)
 {
-       unsigned long vaddr = (unsigned long)__va(paddr);
        unsigned long nrpages = size >> PAGE_SHIFT;
-       unsigned int level;
        int err;
 
-       /* No change for pages after the last mapping */
-       if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT))
-               return 0;
-
-       /*
-        * If there is no identity map for this address,
-        * change_page_attr_addr is unnecessary
-        */
-       if (!lookup_address(vaddr, &level))
-               return 0;
-
        switch (mode) {
        case IOR_MODE_UNCACHED:
        default:
@@ -114,9 +109,8 @@ static int ioremap_change_attr(unsigned long paddr, unsigned long size,
 static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
                               enum ioremap_mode mode)
 {
-       void __iomem *addr;
+       unsigned long pfn, offset, last_addr, vaddr;
        struct vm_struct *area;
-       unsigned long offset, last_addr;
        pgprot_t prot;
 
        /* Don't allow wraparound or zero size */
@@ -133,9 +127,10 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
        /*
         * Don't allow anybody to remap normal RAM that we're using..
         */
-       for (offset = phys_addr >> PAGE_SHIFT; offset < max_pfn_mapped &&
-            (offset << PAGE_SHIFT) < last_addr; offset++) {
-               if (page_is_ram(offset))
+       for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
+            (pfn << PAGE_SHIFT) < last_addr; pfn++) {
+               if (page_is_ram(pfn) && pfn_valid(pfn) &&
+                   !PageReserved(pfn_to_page(pfn)))
                        return NULL;
        }
 
@@ -163,19 +158,18 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
        if (!area)
                return NULL;
        area->phys_addr = phys_addr;
-       addr = (void __iomem *) area->addr;
-       if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
-                              phys_addr, prot)) {
-               remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
+       vaddr = (unsigned long) area->addr;
+       if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
+               free_vm_area(area);
                return NULL;
        }
 
-       if (ioremap_change_attr(phys_addr, size, mode) < 0) {
-               vunmap(addr);
+       if (ioremap_change_attr(vaddr, size, mode) < 0) {
+               vunmap(area->addr);
                return NULL;
        }
 
-       return (void __iomem *) (offset + (char __iomem *)addr);
+       return (void __iomem *) (vaddr + offset);
 }
 
 /**
@@ -254,9 +248,6 @@ void iounmap(volatile void __iomem *addr)
                return;
        }
 
-       /* Reset the direct mapping. Can block */
-       ioremap_change_attr(p->phys_addr, p->size, IOR_MODE_CACHED);
-
        /* Finally remove it */
        o = remove_vm_area((void *)addr);
        BUG_ON(p != o || o == NULL);
@@ -277,41 +268,48 @@ static int __init early_ioremap_debug_setup(char *str)
 early_param("early_ioremap_debug", early_ioremap_debug_setup);
 
 static __initdata int after_paging_init;
-static __initdata unsigned long bm_pte[1024]
+static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
                                __attribute__((aligned(PAGE_SIZE)));
 
-static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 {
-       return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
+       /* Don't assume we're using swapper_pg_dir at this point */
+       pgd_t *base = __va(read_cr3());
+       pgd_t *pgd = &base[pgd_index(addr)];
+       pud_t *pud = pud_offset(pgd, addr);
+       pmd_t *pmd = pmd_offset(pud, addr);
+
+       return pmd;
 }
 
-static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
+static inline pte_t * __init early_ioremap_pte(unsigned long addr)
 {
-       return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
+       return &bm_pte[pte_index(addr)];
 }
 
 void __init early_ioremap_init(void)
 {
-       unsigned long *pgd;
+       pmd_t *pmd;
 
        if (early_ioremap_debug)
                printk(KERN_INFO "early_ioremap_init()\n");
 
-       pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
-       *pgd = __pa(bm_pte) | _PAGE_TABLE;
+       pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
        memset(bm_pte, 0, sizeof(bm_pte));
+       pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
        /*
-        * The boot-ioremap range spans multiple pgds, for which
+        * The boot-ioremap range spans multiple pmds, for which
         * we are not prepared:
         */
-       if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
+       if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
                WARN_ON(1);
-               printk(KERN_WARNING "pgd %p != %p\n",
-                      pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
+               printk(KERN_WARNING "pmd %p != %p\n",
+                      pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
                printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
-                      fix_to_virt(FIX_BTMAP_BEGIN));
+                       fix_to_virt(FIX_BTMAP_BEGIN));
                printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
-                      fix_to_virt(FIX_BTMAP_END));
+                       fix_to_virt(FIX_BTMAP_END));
 
                printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
                printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
@@ -321,28 +319,29 @@ void __init early_ioremap_init(void)
 
 void __init early_ioremap_clear(void)
 {
-       unsigned long *pgd;
+       pmd_t *pmd;
 
        if (early_ioremap_debug)
                printk(KERN_INFO "early_ioremap_clear()\n");
 
-       pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
-       *pgd = 0;
-       paravirt_release_pt(__pa(pgd) >> PAGE_SHIFT);
+       pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+       pmd_clear(pmd);
+       paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
        __flush_tlb_all();
 }
 
 void __init early_ioremap_reset(void)
 {
        enum fixed_addresses idx;
-       unsigned long *pte, phys, addr;
+       unsigned long addr, phys;
+       pte_t *pte;
 
        after_paging_init = 1;
        for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
                addr = fix_to_virt(idx);
                pte = early_ioremap_pte(addr);
-               if (*pte & _PAGE_PRESENT) {
-                       phys = *pte & PAGE_MASK;
+               if (pte_present(*pte)) {
+                       phys = pte_val(*pte) & PAGE_MASK;
                        set_fixmap(idx, phys);
                }
        }
@@ -351,7 +350,8 @@ void __init early_ioremap_reset(void)
 static void __init __early_set_fixmap(enum fixed_addresses idx,
                                   unsigned long phys, pgprot_t flags)
 {
-       unsigned long *pte, addr = __fix_to_virt(idx);
+       unsigned long addr = __fix_to_virt(idx);
+       pte_t *pte;
 
        if (idx >= __end_of_fixed_addresses) {
                BUG();
@@ -359,9 +359,9 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
        }
        pte = early_ioremap_pte(addr);
        if (pgprot_val(flags))
-               *pte = (phys & PAGE_MASK) | pgprot_val(flags);
+               set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
        else
-               *pte = 0;
+               pte_clear(NULL, addr, pte);
        __flush_tlb_one(addr);
 }