Merge tag 'xtensa-20161005' of git://github.com/jcmvbkbc/linux-xtensa
[sfrench/cifs-2.6.git] / arch / xtensa / kernel / setup.c
index 143251ede89784792258469e610e4eb6b0dc6546..88a044af7504dd9c47d0f97d64620ac8de4b5739 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright (C) 1995  Linus Torvalds
  * Copyright (C) 2001 - 2005  Tensilica Inc.
  *
  * Copyright (C) 1995  Linus Torvalds
  * Copyright (C) 2001 - 2005  Tensilica Inc.
+ * Copyright (C) 2014 - 2016  Cadence Design Systems Inc.
  *
  * Chris Zankel        <chris@zankel.net>
  * Joe Taylor  <joe@tensilica.com, joetylr@yahoo.com>
  *
  * Chris Zankel        <chris@zankel.net>
  * Joe Taylor  <joe@tensilica.com, joetylr@yahoo.com>
@@ -22,7 +23,6 @@
 #include <linux/bootmem.h>
 #include <linux/kernel.h>
 #include <linux/percpu.h>
 #include <linux/bootmem.h>
 #include <linux/kernel.h>
 #include <linux/percpu.h>
-#include <linux/clk-provider.h>
 #include <linux/cpu.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/cpu.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
@@ -114,7 +114,7 @@ static int __init parse_tag_mem(const bp_tag_t *tag)
        if (mi->type != MEMORY_TYPE_CONVENTIONAL)
                return -1;
 
        if (mi->type != MEMORY_TYPE_CONVENTIONAL)
                return -1;
 
-       return add_sysmem_bank(mi->start, mi->end);
+       return memblock_add(mi->start, mi->end - mi->start);
 }
 
 __tagtable(BP_TAG_MEMORY, parse_tag_mem);
 }
 
 __tagtable(BP_TAG_MEMORY, parse_tag_mem);
@@ -188,7 +188,6 @@ static int __init parse_bootparam(const bp_tag_t* tag)
 }
 
 #ifdef CONFIG_OF
 }
 
 #ifdef CONFIG_OF
-bool __initdata dt_memory_scan = false;
 
 #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
 unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
 
 #if !XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY
 unsigned long xtensa_kio_paddr = XCHAL_KIO_DEFAULT_PADDR;
@@ -228,11 +227,8 @@ static int __init xtensa_dt_io_area(unsigned long node, const char *uname,
 
 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 {
 
 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-       if (!dt_memory_scan)
-               return;
-
        size &= PAGE_MASK;
        size &= PAGE_MASK;
-       add_sysmem_bank(base, base + size);
+       memblock_add(base, size);
 }
 
 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 }
 
 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
@@ -242,9 +238,6 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
 
 void __init early_init_devtree(void *params)
 {
 
 void __init early_init_devtree(void *params)
 {
-       if (sysmem.nr_banks == 0)
-               dt_memory_scan = true;
-
        early_init_dt_scan(params);
        of_scan_flat_dt(xtensa_dt_io_area, NULL);
 
        early_init_dt_scan(params);
        of_scan_flat_dt(xtensa_dt_io_area, NULL);
 
@@ -252,14 +245,6 @@ void __init early_init_devtree(void *params)
                strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 }
 
                strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 }
 
-static int __init xtensa_device_probe(void)
-{
-       of_clk_init(NULL);
-       return 0;
-}
-
-device_initcall(xtensa_device_probe);
-
 #endif /* CONFIG_OF */
 
 /*
 #endif /* CONFIG_OF */
 
 /*
@@ -277,12 +262,6 @@ void __init init_arch(bp_tag_t *bp_start)
        early_init_devtree(dtb_start);
 #endif
 
        early_init_devtree(dtb_start);
 #endif
 
-       if (sysmem.nr_banks == 0) {
-               add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
-                               PLATFORM_DEFAULT_MEM_START +
-                               PLATFORM_DEFAULT_MEM_SIZE);
-       }
-
 #ifdef CONFIG_CMDLINE_BOOL
        if (!command_line[0])
                strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
 #ifdef CONFIG_CMDLINE_BOOL
        if (!command_line[0])
                strlcpy(command_line, default_command_line, COMMAND_LINE_SIZE);
@@ -452,6 +431,10 @@ static int __init check_s32c1i(void)
 early_initcall(check_s32c1i);
 #endif /* CONFIG_S32C1I_SELFTEST */
 
 early_initcall(check_s32c1i);
 #endif /* CONFIG_S32C1I_SELFTEST */
 
+static inline int mem_reserve(unsigned long start, unsigned long end)
+{
+       return memblock_reserve(start, end - start);
+}
 
 void __init setup_arch(char **cmdline_p)
 {
 
 void __init setup_arch(char **cmdline_p)
 {
@@ -463,54 +446,54 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_BLK_DEV_INITRD
        if (initrd_start < initrd_end) {
                initrd_is_mapped = mem_reserve(__pa(initrd_start),
 #ifdef CONFIG_BLK_DEV_INITRD
        if (initrd_start < initrd_end) {
                initrd_is_mapped = mem_reserve(__pa(initrd_start),
-                                              __pa(initrd_end), 0) == 0;
+                                              __pa(initrd_end)) == 0;
                initrd_below_start_ok = 1;
        } else {
                initrd_start = 0;
        }
 #endif
 
                initrd_below_start_ok = 1;
        } else {
                initrd_start = 0;
        }
 #endif
 
-       mem_reserve(__pa(&_stext),__pa(&_end), 1);
+       mem_reserve(__pa(&_stext), __pa(&_end));
 
        mem_reserve(__pa(&_WindowVectors_text_start),
 
        mem_reserve(__pa(&_WindowVectors_text_start),
-                   __pa(&_WindowVectors_text_end), 0);
+                   __pa(&_WindowVectors_text_end));
 
        mem_reserve(__pa(&_DebugInterruptVector_literal_start),
 
        mem_reserve(__pa(&_DebugInterruptVector_literal_start),
-                   __pa(&_DebugInterruptVector_text_end), 0);
+                   __pa(&_DebugInterruptVector_text_end));
 
        mem_reserve(__pa(&_KernelExceptionVector_literal_start),
 
        mem_reserve(__pa(&_KernelExceptionVector_literal_start),
-                   __pa(&_KernelExceptionVector_text_end), 0);
+                   __pa(&_KernelExceptionVector_text_end));
 
        mem_reserve(__pa(&_UserExceptionVector_literal_start),
 
        mem_reserve(__pa(&_UserExceptionVector_literal_start),
-                   __pa(&_UserExceptionVector_text_end), 0);
+                   __pa(&_UserExceptionVector_text_end));
 
        mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
 
        mem_reserve(__pa(&_DoubleExceptionVector_literal_start),
-                   __pa(&_DoubleExceptionVector_text_end), 0);
+                   __pa(&_DoubleExceptionVector_text_end));
 
 #if XCHAL_EXCM_LEVEL >= 2
        mem_reserve(__pa(&_Level2InterruptVector_text_start),
 
 #if XCHAL_EXCM_LEVEL >= 2
        mem_reserve(__pa(&_Level2InterruptVector_text_start),
-                   __pa(&_Level2InterruptVector_text_end), 0);
+                   __pa(&_Level2InterruptVector_text_end));
 #endif
 #if XCHAL_EXCM_LEVEL >= 3
        mem_reserve(__pa(&_Level3InterruptVector_text_start),
 #endif
 #if XCHAL_EXCM_LEVEL >= 3
        mem_reserve(__pa(&_Level3InterruptVector_text_start),
-                   __pa(&_Level3InterruptVector_text_end), 0);
+                   __pa(&_Level3InterruptVector_text_end));
 #endif
 #if XCHAL_EXCM_LEVEL >= 4
        mem_reserve(__pa(&_Level4InterruptVector_text_start),
 #endif
 #if XCHAL_EXCM_LEVEL >= 4
        mem_reserve(__pa(&_Level4InterruptVector_text_start),
-                   __pa(&_Level4InterruptVector_text_end), 0);
+                   __pa(&_Level4InterruptVector_text_end));
 #endif
 #if XCHAL_EXCM_LEVEL >= 5
        mem_reserve(__pa(&_Level5InterruptVector_text_start),
 #endif
 #if XCHAL_EXCM_LEVEL >= 5
        mem_reserve(__pa(&_Level5InterruptVector_text_start),
-                   __pa(&_Level5InterruptVector_text_end), 0);
+                   __pa(&_Level5InterruptVector_text_end));
 #endif
 #if XCHAL_EXCM_LEVEL >= 6
        mem_reserve(__pa(&_Level6InterruptVector_text_start),
 #endif
 #if XCHAL_EXCM_LEVEL >= 6
        mem_reserve(__pa(&_Level6InterruptVector_text_start),
-                   __pa(&_Level6InterruptVector_text_end), 0);
+                   __pa(&_Level6InterruptVector_text_end));
 #endif
 
 #ifdef CONFIG_SMP
        mem_reserve(__pa(&_SecondaryResetVector_text_start),
 #endif
 
 #ifdef CONFIG_SMP
        mem_reserve(__pa(&_SecondaryResetVector_text_start),
-                   __pa(&_SecondaryResetVector_text_end), 0);
+                   __pa(&_SecondaryResetVector_text_end));
 #endif
        parse_early_param();
        bootmem_init();
 #endif
        parse_early_param();
        bootmem_init();
@@ -555,6 +538,137 @@ static int __init topology_init(void)
 }
 subsys_initcall(topology_init);
 
 }
 subsys_initcall(topology_init);
 
+void cpu_reset(void)
+{
+#if XCHAL_HAVE_PTP_MMU
+       local_irq_disable();
+       /*
+        * We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
+        * be flushed.
+        * Way 4 is not currently used by linux.
+        * Ways 5 and 6 shall not be touched on MMUv2 as they are hardwired.
+        * Way 5 shall be flushed and way 6 shall be set to identity mapping
+        * on MMUv3.
+        */
+       local_flush_tlb_all();
+       invalidate_page_directory();
+#if XCHAL_HAVE_SPANNING_WAY
+       /* MMU v3 */
+       {
+               unsigned long vaddr = (unsigned long)cpu_reset;
+               unsigned long paddr = __pa(vaddr);
+               unsigned long tmpaddr = vaddr + SZ_512M;
+               unsigned long tmp0, tmp1, tmp2, tmp3;
+
+               /*
+                * Find a place for the temporary mapping. It must not be
+                * in the same 512MB region with vaddr or paddr, otherwise
+                * there may be multihit exception either on entry to the
+                * temporary mapping, or on entry to the identity mapping.
+                * (512MB is the biggest page size supported by TLB.)
+                */
+               while (((tmpaddr ^ paddr) & -SZ_512M) == 0)
+                       tmpaddr += SZ_512M;
+
+               /* Invalidate mapping in the selected temporary area */
+               if (itlb_probe(tmpaddr) & 0x8)
+                       invalidate_itlb_entry(itlb_probe(tmpaddr));
+               if (itlb_probe(tmpaddr + PAGE_SIZE) & 0x8)
+                       invalidate_itlb_entry(itlb_probe(tmpaddr + PAGE_SIZE));
+
+               /*
+                * Map two consecutive pages starting at the physical address
+                * of this function to the temporary mapping area.
+                */
+               write_itlb_entry(__pte((paddr & PAGE_MASK) |
+                                      _PAGE_HW_VALID |
+                                      _PAGE_HW_EXEC |
+                                      _PAGE_CA_BYPASS),
+                                tmpaddr & PAGE_MASK);
+               write_itlb_entry(__pte(((paddr & PAGE_MASK) + PAGE_SIZE) |
+                                      _PAGE_HW_VALID |
+                                      _PAGE_HW_EXEC |
+                                      _PAGE_CA_BYPASS),
+                                (tmpaddr & PAGE_MASK) + PAGE_SIZE);
+
+               /* Reinitialize TLB */
+               __asm__ __volatile__ ("movi     %0, 1f\n\t"
+                                     "movi     %3, 2f\n\t"
+                                     "add      %0, %0, %4\n\t"
+                                     "add      %3, %3, %5\n\t"
+                                     "jx       %0\n"
+                                     /*
+                                      * No literal, data or stack access
+                                      * below this point
+                                      */
+                                     "1:\n\t"
+                                     /* Initialize *tlbcfg */
+                                     "movi     %0, 0\n\t"
+                                     "wsr      %0, itlbcfg\n\t"
+                                     "wsr      %0, dtlbcfg\n\t"
+                                     /* Invalidate TLB way 5 */
+                                     "movi     %0, 4\n\t"
+                                     "movi     %1, 5\n"
+                                     "1:\n\t"
+                                     "iitlb    %1\n\t"
+                                     "idtlb    %1\n\t"
+                                     "add      %1, %1, %6\n\t"
+                                     "addi     %0, %0, -1\n\t"
+                                     "bnez     %0, 1b\n\t"
+                                     /* Initialize TLB way 6 */
+                                     "movi     %0, 7\n\t"
+                                     "addi     %1, %9, 3\n\t"
+                                     "addi     %2, %9, 6\n"
+                                     "1:\n\t"
+                                     "witlb    %1, %2\n\t"
+                                     "wdtlb    %1, %2\n\t"
+                                     "add      %1, %1, %7\n\t"
+                                     "add      %2, %2, %7\n\t"
+                                     "addi     %0, %0, -1\n\t"
+                                     "bnez     %0, 1b\n\t"
+                                     /* Jump to identity mapping */
+                                     "jx       %3\n"
+                                     "2:\n\t"
+                                     /* Complete way 6 initialization */
+                                     "witlb    %1, %2\n\t"
+                                     "wdtlb    %1, %2\n\t"
+                                     /* Invalidate temporary mapping */
+                                     "sub      %0, %9, %7\n\t"
+                                     "iitlb    %0\n\t"
+                                     "add      %0, %0, %8\n\t"
+                                     "iitlb    %0"
+                                     : "=&a"(tmp0), "=&a"(tmp1), "=&a"(tmp2),
+                                       "=&a"(tmp3)
+                                     : "a"(tmpaddr - vaddr),
+                                       "a"(paddr - vaddr),
+                                       "a"(SZ_128M), "a"(SZ_512M),
+                                       "a"(PAGE_SIZE),
+                                       "a"((tmpaddr + SZ_512M) & PAGE_MASK)
+                                     : "memory");
+       }
+#endif
+#endif
+       __asm__ __volatile__ ("movi     a2, 0\n\t"
+                             "wsr      a2, icountlevel\n\t"
+                             "movi     a2, 0\n\t"
+                             "wsr      a2, icount\n\t"
+#if XCHAL_NUM_IBREAK > 0
+                             "wsr      a2, ibreakenable\n\t"
+#endif
+#if XCHAL_HAVE_LOOPS
+                             "wsr      a2, lcount\n\t"
+#endif
+                             "movi     a2, 0x1f\n\t"
+                             "wsr      a2, ps\n\t"
+                             "isync\n\t"
+                             "jx       %0\n\t"
+                             :
+                             : "a" (XCHAL_RESET_VECTOR_VADDR)
+                             : "a2");
+       for (;;)
+               ;
+}
+
 void machine_restart(char * cmd)
 {
        platform_restart();
 void machine_restart(char * cmd)
 {
        platform_restart();