arm64: mm: Create gigabyte kernel logical mappings where possible
authorSteve Capper <steve.capper@linaro.org>
Tue, 6 May 2014 13:02:27 +0000 (14:02 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 9 May 2014 15:10:58 +0000 (16:10 +0100)
We have the capability to map 1GB level 1 blocks when using a 4K
granule.

This patch adjusts the create_mapping logic s.t. when mapping physical
memory on boot, we attempt to use a 1GB block if both the VA and PA
start and end are 1GB aligned. This both reduces the levels of lookup
required to resolve a kernel logical address, as well as reduces TLB
pressure on cores that support 1GB TLB entries.

Signed-off-by: Steve Capper <steve.capper@linaro.org>
Tested-by: Jungseok Lee <jays.lee@samsung.com>
[catalin.marinas@arm.com: s/prot_sect_kernel/PROT_SECT_NORMAL_EXEC/]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable.h
arch/arm64/mm/mmu.c

index 5fc8a66c39248a742168250e2ca94bc5ebfa3a9a..955e8c5f0afbb1a198541e53f81676baf9f87980 100644 (file)
@@ -29,6 +29,8 @@
  */
 
 #define PUD_TABLE_BIT          (_AT(pgdval_t, 1) << 1)
+#define PUD_TYPE_MASK          (_AT(pgdval_t, 3) << 0)
+#define PUD_TYPE_SECT          (_AT(pgdval_t, 1) << 0)
 
 /*
  * Level 2 descriptor (PMD).
index 752348dbb4f3d6ef06c9620e35744459b7d5987a..3de4ef8bfd82dc943bdf414512c8494da0f4ff65 100644 (file)
@@ -259,6 +259,7 @@ static inline pmd_t pte_pmd(pte_t pte)
 #define mk_pmd(page,prot)      pfn_pmd(page_to_pfn(page),prot)
 
 #define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+#define pud_pfn(pud)           (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
 
 #define set_pmd_at(mm, addr, pmdp, pmd)        set_pmd(pmdp, pmd)
 
@@ -292,6 +293,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 #define pmd_sect(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
                                 PMD_TYPE_SECT)
 
+#ifdef ARM64_64K_PAGES
+#define pud_sect(pud)          (0)
+#else
+#define pud_sect(pud)          ((pud_val(pud) & PUD_TYPE_MASK) == \
+                                PUD_TYPE_SECT)
+#endif
 
 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
index 2c0e1dda816370d4d6e30c560fd5cef6d341383f..3a729de96f1506239d239423bd9f47d0be20b5bc 100644 (file)
@@ -195,7 +195,30 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
 
        do {
                next = pud_addr_end(addr, end);
-               alloc_init_pmd(pud, addr, next, phys);
+
+               /*
+                * For 4K granule only, attempt to put down a 1GB block
+                */
+               if ((PAGE_SHIFT == 12) &&
+                   ((addr | next | phys) & ~PUD_MASK) == 0) {
+                       pud_t old_pud = *pud;
+                       set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
+
+                       /*
+                        * If we have an old value for a pud, it will
+                        * be pointing to a pmd table that we no longer
+                        * need (from swapper_pg_dir).
+                        *
+                        * Look up the old pmd table and free it.
+                        */
+                       if (!pud_none(old_pud)) {
+                               phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+                               memblock_free(table, PAGE_SIZE);
+                               flush_tlb_all();
+                       }
+               } else {
+                       alloc_init_pmd(pud, addr, next, phys);
+               }
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
 }
@@ -338,6 +361,9 @@ int kern_addr_valid(unsigned long addr)
        if (pud_none(*pud))
                return 0;
 
+       if (pud_sect(*pud))
+               return pfn_valid(pud_pfn(*pud));
+
        pmd = pmd_offset(pud, addr);
        if (pmd_none(*pmd))
                return 0;