s390/mm: fix pmd_huge() usage for kernel mapping
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Mon, 1 Oct 2012 10:58:34 +0000 (12:58 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 9 Oct 2012 12:16:56 +0000 (14:16 +0200)
pmd_huge() will always return 0 on !HUGETLBFS, however we use that helper
function when walking the kernel page tables to decide if we have a
1MB page frame or not.
Since we create 1MB frames for the kernel 1:1 mapping independently of
HUGETLBFS this can lead to incorrect storage accesses since the code
can assume that we have a pointer to a page table instead of a pointer
to a 1MB frame.

Fix this by adding a pmd_large() primitive like other architectures have
it already and remove all references to HUGETLBFS/HUGETLBPAGE from the
code that walks kernel page tables.

Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/pgtable.h
arch/s390/mm/pageattr.c
arch/s390/mm/vmem.c

index 979fe3dc07889b0cd50df90508ace5513c2392f3..75b91bb772bd3e5087818bf73a04e177cbea834a 100644 (file)
@@ -507,6 +507,15 @@ static inline int pmd_none(pmd_t pmd)
        return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
 }
 
+static inline int pmd_large(pmd_t pmd)
+{
+#ifdef CONFIG_64BIT
+       return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
+#else
+       return 0;
+#endif
+}
+
 static inline int pmd_bad(pmd_t pmd)
 {
        unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
index b36537a5f43e33e3a1a6cc9907efcf3920e8d583..0f33bab3e98483d6ac5df2f430cfa868d82517b7 100644 (file)
@@ -21,7 +21,7 @@ static void change_page_attr(unsigned long addr, int numpages,
                pgdp = pgd_offset(&init_mm, addr);
                pudp = pud_offset(pgdp, addr);
                pmdp = pmd_offset(pudp, addr);
-               if (pmd_huge(*pmdp)) {
+               if (pmd_large(*pmdp)) {
                        WARN_ON_ONCE(1);
                        continue;
                }
index c22abf900c9e8a1e5b1db3bb36b9f77530dd0901..5b70393911bdf0bdb9809faf5433e318454bfb0a 100644 (file)
@@ -79,7 +79,8 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
  */
 static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
 {
-       unsigned long address;
+       unsigned long end = start + size;
+       unsigned long address = start;
        pgd_t *pg_dir;
        pud_t *pu_dir;
        pmd_t *pm_dir;
@@ -87,7 +88,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
        pte_t  pte;
        int ret = -ENOMEM;
 
-       for (address = start; address < start + size; address += PAGE_SIZE) {
+       while (address < end) {
                pg_dir = pgd_offset_k(address);
                if (pgd_none(*pg_dir)) {
                        pu_dir = vmem_pud_alloc();
@@ -108,12 +109,11 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
                pm_dir = pmd_offset(pu_dir, address);
 
 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
-               if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
-                   (address + HPAGE_SIZE <= start + size) &&
-                   (address >= HPAGE_SIZE)) {
+               if (MACHINE_HAS_EDAT1 && address && !(address & ~PMD_MASK) &&
+                   (address + PMD_SIZE <= end)) {
                        pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
                        pmd_val(*pm_dir) = pte_val(pte);
-                       address += HPAGE_SIZE - PAGE_SIZE;
+                       address += PMD_SIZE;
                        continue;
                }
 #endif
@@ -126,10 +126,11 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
 
                pt_dir = pte_offset_kernel(pm_dir, address);
                *pt_dir = pte;
+               address += PAGE_SIZE;
        }
        ret = 0;
 out:
-       flush_tlb_kernel_range(start, start + size);
+       flush_tlb_kernel_range(start, end);
        return ret;
 }
 
@@ -139,7 +140,8 @@ out:
  */
 static void vmem_remove_range(unsigned long start, unsigned long size)
 {
-       unsigned long address;
+       unsigned long end = start + size;
+       unsigned long address = start;
        pgd_t *pg_dir;
        pud_t *pu_dir;
        pmd_t *pm_dir;
@@ -147,7 +149,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
        pte_t  pte;
 
        pte_val(pte) = _PAGE_TYPE_EMPTY;
-       for (address = start; address < start + size; address += PAGE_SIZE) {
+       while (address < end) {
                pg_dir = pgd_offset_k(address);
                pu_dir = pud_offset(pg_dir, address);
                if (pud_none(*pu_dir))
@@ -155,17 +157,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
                pm_dir = pmd_offset(pu_dir, address);
                if (pmd_none(*pm_dir))
                        continue;
-
-               if (pmd_huge(*pm_dir)) {
+               if (pmd_large(*pm_dir)) {
                        pmd_clear(pm_dir);
-                       address += HPAGE_SIZE - PAGE_SIZE;
+                       address += PMD_SIZE;
                        continue;
                }
-
                pt_dir = pte_offset_kernel(pm_dir, address);
                *pt_dir = pte;
+               address += PAGE_SIZE;
        }
-       flush_tlb_kernel_range(start, start + size);
+       flush_tlb_kernel_range(start, end);
 }
 
 /*