Merge tag '9p-for-4.20' of git://github.com/martinetd/linux
[sfrench/cifs-2.6.git] / arch / powerpc / mm / pgtable_32.c
index 120a49bfb9c645b83d2deb6a635a922f9af9f62a..5877f5aa8f5d0a0d00da87e749452236c1a4e35c 100644 (file)
@@ -76,56 +76,69 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 void __iomem *
 ioremap(phys_addr_t addr, unsigned long size)
 {
-       return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED,
-                               __builtin_return_address(0));
+       pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
+
+       return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap);
 
 void __iomem *
 ioremap_wc(phys_addr_t addr, unsigned long size)
 {
-       return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
-                               __builtin_return_address(0));
+       pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
+
+       return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_wc);
 
+void __iomem *
+ioremap_wt(phys_addr_t addr, unsigned long size)
+{
+       pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL);
+
+       return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wt);
+
+void __iomem *
+ioremap_coherent(phys_addr_t addr, unsigned long size)
+{
+       pgprot_t prot = pgprot_cached(PAGE_KERNEL);
+
+       return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_coherent);
+
 void __iomem *
 ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
 {
+       pte_t pte = __pte(flags);
+
        /* writeable implies dirty for kernel addresses */
-       if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO)
-               flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
+       if (pte_write(pte))
+               pte = pte_mkdirty(pte);
 
        /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
-       flags &= ~(_PAGE_USER | _PAGE_EXEC);
-       flags |= _PAGE_PRIVILEGED;
+       pte = pte_exprotect(pte);
+       pte = pte_mkprivileged(pte);
 
-       return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+       return __ioremap_caller(addr, size, pte_pgprot(pte), __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_prot);
 
 void __iomem *
 __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
 {
-       return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+       return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
 }
 
 void __iomem *
-__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
-                void *caller)
+__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
 {
        unsigned long v, i;
        phys_addr_t p;
        int err;
 
-       /* Make sure we have the base flags */
-       if ((flags & _PAGE_PRESENT) == 0)
-               flags |= pgprot_val(PAGE_KERNEL);
-
-       /* Non-cacheable page cannot be coherent */
-       if (flags & _PAGE_NO_CACHE)
-               flags &= ~_PAGE_COHERENT;
-
        /*
         * Choose an address to map it to.
         * Once the vmalloc system is running, we use it.
@@ -183,7 +196,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
 
        err = 0;
        for (i = 0; i < size && err == 0; i += PAGE_SIZE)
-               err = map_kernel_page(v+i, p+i, flags);
+               err = map_kernel_page(v + i, p + i, prot);
        if (err) {
                if (slab_is_available())
                        vunmap((void *)v);
@@ -209,7 +222,7 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
-int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
 {
        pmd_t *pd;
        pte_t *pg;
@@ -224,10 +237,8 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
                /* The PTE should never be already set nor present in the
                 * hash table
                 */
-               BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
-                      flags);
-               set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
-                                                    __pgprot(flags)));
+               BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
+               set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
        }
        smp_wmb();
        return err;
@@ -238,7 +249,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
  */
 static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
 {
-       unsigned long v, s, f;
+       unsigned long v, s;
        phys_addr_t p;
        int ktext;
 
@@ -248,11 +259,10 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
        for (; s < top; s += PAGE_SIZE) {
                ktext = ((char *)v >= _stext && (char *)v < etext) ||
                        ((char *)v >= _sinittext && (char *)v < _einittext);
-               f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
-               map_kernel_page(v, p, f);
+               map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
 #ifdef CONFIG_PPC_STD_MMU_32
                if (ktext)
-                       hash_preload(&init_mm, v, 0, 0x300);
+                       hash_preload(&init_mm, v, false, 0x300);
 #endif
                v += PAGE_SIZE;
                p += PAGE_SIZE;