1 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
19 #include <linux/init.h>
20 #include <linux/pgtable.h>
24 #include <asm/cputable.h>
25 #include <asm/cache.h>
26 #include <asm/thread_info.h>
27 #include <asm/ppc_asm.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/ptrace.h>
31 #include <asm/kvm_book3s_asm.h>
32 #include <asm/export.h>
33 #include <asm/feature-fixups.h>
37 #define LOAD_BAT(n, reg, RA, RB) \
38 /* see the comment for clear_bats() -- Cort */ \
40 mtspr SPRN_IBAT##n##U,RA; \
41 mtspr SPRN_DBAT##n##U,RA; \
42 lwz RA,(n*16)+0(reg); \
43 lwz RB,(n*16)+4(reg); \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_IBAT##n##L,RB; \
46 lwz RA,(n*16)+8(reg); \
47 lwz RB,(n*16)+12(reg); \
48 mtspr SPRN_DBAT##n##U,RA; \
49 mtspr SPRN_DBAT##n##L,RB
52 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
53 .stabs "head_book3s_32.S",N_SO,0,0,0f
58 * _start is defined this way because the XCOFF loader in the OpenFirmware
59 * on the powermac expects the entry point to be a procedure descriptor.
63 * These are here for legacy reasons, the kernel used to
64 * need to look like a coff function entry for the pmac
65 * but we're always started by some kind of bootloader now.
68 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
69 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
73 * Enter here with the kernel text, data and bss loaded starting at
74 * 0, running with virtual == physical mapping.
75 * r5 points to the prom entry point (the client interface handler
76 * address). Address translation is turned on, with the prom
77 * managing the hash table. Interrupts are disabled. The stack
78 * pointer (r1) points to just below the end of the half-meg region
79 * from 0x380000 - 0x400000, which is mapped in already.
81 * If we are booted from MacOS via BootX, we enter with the kernel
82 * image loaded somewhere, and the following values in registers:
83 * r3: 'BooX' (0x426f6f58)
84 * r4: virtual address of boot_infos_t
88 * This is jumped to on prep systems right after the kernel is relocated
89 * to its proper place in memory by the boot loader. The expected layout
91 * r3: ptr to residual data
92 * r4: initrd_start or if no initrd then 0
93 * r5: initrd_end - unused if r4 is 0
94 * r6: Start of command line string
95 * r7: End of command line string
97 * This just gets a minimal mmu environment setup so we can call
98 * start_here() to do the real work.
105 * We have to do any OF calls before we map ourselves to KERNELBASE,
106 * because OF may have I/O devices mapped into that area
107 * (particularly on CHRP).
112 #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
113 /* find out where we are now */
115 0: mflr r8 /* r8 = runtime addr here */
116 addis r8,r8,(_stext - 0b)@ha
117 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
119 #endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
121 /* We never return. We also hit that trap if trying to boot
122 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
126 * Check for BootX signature when supporting PowerMac and branch to
127 * appropriate trampoline if it's present
129 #ifdef CONFIG_PPC_PMAC
136 #endif /* CONFIG_PPC_PMAC */
138 1: mr r31,r3 /* save device tree ptr */
142 * early_init() does the early machine identification and does
143 * the necessary low-level setup and clears the BSS
144 * -- Cort <cort@fsmlabs.com>
148 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
149 * the physical address we are running at, returned by early_init()
157 bl load_segment_registers
160 #if defined(CONFIG_BOOTX_TEXT)
163 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
166 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
167 bl setup_usbgecko_bat
171 * Call setup_cpu for CPU 0 and initialize 6xx Idle
175 bl call_setup_cpu /* Call setup_cpu for this CPU */
181 * We need to run with _start at physical address 0.
182 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
183 * the exception vectors at 0 (and therefore this copy
184 * overwrites OF's exception vectors with our own).
185 * The MMU is off at this point.
189 addis r4,r3,KERNELBASE@h /* current address of _start */
190 lis r5,PHYSICAL_START@h
191 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
194 * we now have the 1st 16M of ram mapped with the bats.
195 * prep needs the mmu to be turned on here, but pmac already has it on.
196 * this shouldn't bother the pmac since it just gets turned on again
197 * as we jump to our code at KERNELBASE. -- Cort
198 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
199 * off, and in other cases, we now turn it off before changing BATs above.
203 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
206 ori r0,r0,start_here@l
208 rfi /* enables MMU */
211 * We need __secondary_hold as a place to hold the other cpus on
212 * an SMP machine, even when we are running a UP kernel.
214 . = 0xc0 /* for prep bootloader */
215 li r3,1 /* MTX only has 1 cpu */
216 .globl __secondary_hold
218 /* tell the master we're here */
219 stw r3,__secondary_hold_acknowledge@l(0)
222 /* wait until we're told to start */
225 /* our cpu # was at addr 0 - go */
226 mr r24,r3 /* cpu # */
230 #endif /* CONFIG_SMP */
232 .globl __secondary_hold_spinloop
233 __secondary_hold_spinloop:
235 .globl __secondary_hold_acknowledge
236 __secondary_hold_acknowledge:
240 /* core99 pmac starts the seconary here by changing the vector, and
241 putting it back to what it was (unknown_async_exception) when done. */
242 EXCEPTION(0x100, Reset, unknown_async_exception, EXC_XFER_STD)
246 * On CHRP, this is complicated by the fact that we could get a
247 * machine check inside RTAS, and we have no guarantee that certain
248 * critical registers will have the values we expect. The set of
249 * registers that might have bad values includes all the GPRs
250 * and all the BATs. We indicate that we are in RTAS by putting
251 * a non-zero value, the address of the exception frame to use,
252 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
253 * and uses its value if it is non-zero.
254 * (Other exception handlers assume that r1 is a valid kernel stack
255 * pointer when we take an exception from supervisor mode.)
262 #ifdef CONFIG_PPC_CHRP
263 #ifdef CONFIG_VMAP_STACK
264 mtspr SPRN_SPRG_SCRATCH2,r1
265 mfspr r1, SPRN_SPRG_THREAD
269 mfspr r1, SPRN_SPRG_SCRATCH2
271 mfspr r11, SPRN_SPRG_THREAD
272 lwz r11, RTAS_SP(r11)
276 #endif /* CONFIG_PPC_CHRP */
277 EXCEPTION_PROLOG_1 for_rtas=1
278 7: EXCEPTION_PROLOG_2
279 addi r3,r1,STACK_FRAME_OVERHEAD
280 #ifdef CONFIG_PPC_CHRP
281 beq cr1, machine_check_tramp
284 b machine_check_tramp
287 /* Data access exception. */
291 #ifdef CONFIG_VMAP_STACK
292 #ifdef CONFIG_PPC_BOOK3S_604
293 BEGIN_MMU_FTR_SECTION
294 mtspr SPRN_SPRG_SCRATCH2,r10
295 mfspr r10, SPRN_SPRG_THREAD
297 mfspr r10, SPRN_DSISR
299 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
300 mfspr r10, SPRN_SPRG_THREAD
302 .Lhash_page_dsi_cont:
305 mfspr r10, SPRN_SPRG_SCRATCH2
308 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
310 1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
312 b handle_page_fault_tramp_1
313 #else /* CONFIG_VMAP_STACK */
314 EXCEPTION_PROLOG handle_dar_dsisr=1
315 get_and_save_dar_dsisr_on_stack r4, r5, r11
316 #ifdef CONFIG_PPC_BOOK3S_604
317 BEGIN_MMU_FTR_SECTION
318 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
319 bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
320 rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
322 b handle_page_fault_tramp_1
325 b handle_page_fault_tramp_2
326 #ifdef CONFIG_PPC_BOOK3S_604
327 ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
329 #endif /* CONFIG_VMAP_STACK */
331 /* Instruction access exception. */
335 #ifdef CONFIG_VMAP_STACK
336 mtspr SPRN_SPRG_SCRATCH0,r10
337 mtspr SPRN_SPRG_SCRATCH1,r11
338 mfspr r10, SPRN_SPRG_THREAD
341 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
344 #ifdef CONFIG_PPC_BOOK3S_604
345 BEGIN_MMU_FTR_SECTION
346 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
348 .Lhash_page_isi_cont:
349 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
350 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
352 andi. r11, r11, MSR_PR
356 #else /* CONFIG_VMAP_STACK */
358 andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
359 beq 1f /* if so, try to put a PTE */
360 li r3,0 /* into the hash table */
361 mr r4,r12 /* SRR0 is fault address */
362 #ifdef CONFIG_PPC_BOOK3S_604
363 BEGIN_MMU_FTR_SECTION
365 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
367 #endif /* CONFIG_VMAP_STACK */
368 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
371 EXC_XFER_LITE(0x400, handle_page_fault)
373 /* External interrupt */
374 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
376 /* Alignment exception */
380 EXCEPTION_PROLOG handle_dar_dsisr=1
381 save_dar_dsisr_on_stack r4, r5, r11
382 addi r3,r1,STACK_FRAME_OVERHEAD
383 b alignment_exception_tramp
385 /* Program check exception */
386 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
388 /* Floating-point unavailable */
392 #ifdef CONFIG_PPC_FPU
395 * Certain Freescale cores don't have a FPU and treat fp instructions
396 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
399 END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
402 bl load_up_fpu /* if from user, just load it up */
403 b fast_exception_return
404 1: addi r3,r1,STACK_FRAME_OVERHEAD
405 EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
411 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
413 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
414 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
422 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
423 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
426 * The Altivec unavailable trap is at 0x0f20. Foo.
427 * We effectively remap it to 0x3000.
428 * We include an altivec unavailable exception vector even if
429 * not configured for Altivec, so that you can't panic a
430 * non-altivec kernel running on a machine with altivec just
431 * by executing an altivec instruction.
442 * Handle TLB miss for instruction on 603/603e.
443 * Note: we get an alternate set of r0 - r3 to use automatically.
449 * r1: linux style pte ( later becomes ppc hardware pte )
450 * r2: ptr to linux-style pte
453 /* Get PTE (linux-style) and check access */
455 #ifdef CONFIG_MODULES
456 lis r1, TASK_SIZE@h /* check if kernel address */
460 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
461 rlwinm r2, r2, 28, 0xfffff000
462 #ifdef CONFIG_MODULES
464 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
465 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
466 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
468 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
469 lwz r2,0(r2) /* get pmd entry */
470 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
471 beq- InstructionAddressInvalid /* return if no mapping */
472 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
473 lwz r0,0(r2) /* get linux-style pte */
474 andc. r1,r1,r0 /* check access & ~permission */
475 bne- InstructionAddressInvalid /* return if access not permitted */
476 /* Convert linux-style PTE to low word of PPC-style PTE */
477 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
478 ori r1, r1, 0xe06 /* clear out reserved bits */
479 andc r1, r0, r1 /* PP = user? 1 : 0 */
481 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
482 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
485 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
488 InstructionAddressInvalid:
490 rlwinm r1,r3,9,6,6 /* Get load/store bit */
493 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
494 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
497 mfspr r1,SPRN_IMISS /* Get failing address */
498 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
499 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
501 mtspr SPRN_DAR,r1 /* Set fault address */
502 mfmsr r0 /* Restore "normal" registers */
503 xoris r0,r0,MSR_TGPR>>16
504 mtcrf 0x80,r3 /* Restore CR0 */
509 * Handle TLB miss for DATA Load operation on 603/603e
515 * r1: linux style pte ( later becomes ppc hardware pte )
516 * r2: ptr to linux-style pte
519 /* Get PTE (linux-style) and check access */
521 lis r1, TASK_SIZE@h /* check if kernel address */
524 li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
525 rlwinm r2, r2, 28, 0xfffff000
527 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
528 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
529 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
530 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
531 lwz r2,0(r2) /* get pmd entry */
532 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
533 beq- DataAddressInvalid /* return if no mapping */
534 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
535 lwz r0,0(r2) /* get linux-style pte */
536 andc. r1,r1,r0 /* check access & ~permission */
537 bne- DataAddressInvalid /* return if access not permitted */
539 * NOTE! We are assuming this is not an SMP system, otherwise
540 * we would need to update the pte atomically with lwarx/stwcx.
542 /* Convert linux-style PTE to low word of PPC-style PTE */
543 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
544 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
545 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
546 ori r1,r1,0xe04 /* clear out reserved bits */
547 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
549 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
550 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
552 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
554 BEGIN_MMU_FTR_SECTION
556 mfspr r1,SPRN_SPRG_603_LRU
557 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
561 mtspr SPRN_SPRG_603_LRU,r1
563 rlwimi r2,r0,31-14,14,14
565 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
570 rlwinm r1,r3,9,6,6 /* Get load/store bit */
573 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
575 mfspr r1,SPRN_DMISS /* Get failing address */
576 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
577 beq 20f /* Jump if big endian */
579 20: mtspr SPRN_DAR,r1 /* Set fault address */
580 mfmsr r0 /* Restore "normal" registers */
581 xoris r0,r0,MSR_TGPR>>16
582 mtcrf 0x80,r3 /* Restore CR0 */
587 * Handle TLB miss for DATA Store on 603/603e
593 * r1: linux style pte ( later becomes ppc hardware pte )
594 * r2: ptr to linux-style pte
597 /* Get PTE (linux-style) and check access */
599 lis r1, TASK_SIZE@h /* check if kernel address */
602 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
603 rlwinm r2, r2, 28, 0xfffff000
605 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
606 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
607 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
608 112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
609 lwz r2,0(r2) /* get pmd entry */
610 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
611 beq- DataAddressInvalid /* return if no mapping */
612 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
613 lwz r0,0(r2) /* get linux-style pte */
614 andc. r1,r1,r0 /* check access & ~permission */
615 bne- DataAddressInvalid /* return if access not permitted */
617 * NOTE! We are assuming this is not an SMP system, otherwise
618 * we would need to update the pte atomically with lwarx/stwcx.
620 /* Convert linux-style PTE to low word of PPC-style PTE */
621 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
622 li r1,0xe06 /* clear out reserved bits & PP msb */
623 andc r1,r0,r1 /* PP = user? 1: 0 */
625 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
626 END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
628 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
630 BEGIN_MMU_FTR_SECTION
632 mfspr r1,SPRN_SPRG_603_LRU
633 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
637 mtspr SPRN_SPRG_603_LRU,r1
639 rlwimi r2,r0,31-14,14,14
641 END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
645 #ifndef CONFIG_ALTIVEC
646 #define altivec_assist_exception unknown_exception
649 #ifndef CONFIG_TAU_INT
650 #define TAUException unknown_async_exception
653 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
654 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
655 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
656 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
657 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
658 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
659 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
660 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
661 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
662 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
663 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
664 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
665 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
666 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
667 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
668 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
669 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
670 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
671 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
672 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
673 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
674 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
675 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
676 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
677 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
678 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
679 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
680 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
681 EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
686 EXC_XFER_STD(0x200, machine_check_exception)
688 alignment_exception_tramp:
689 EXC_XFER_STD(0x600, alignment_exception)
691 handle_page_fault_tramp_1:
692 #ifdef CONFIG_VMAP_STACK
693 EXCEPTION_PROLOG_2 handle_dar_dsisr=1
697 handle_page_fault_tramp_2:
698 andis. r0, r5, DSISR_DABRMATCH@h
700 EXC_XFER_LITE(0x300, handle_page_fault)
701 1: EXC_XFER_STD(0x300, do_break)
703 #ifdef CONFIG_VMAP_STACK
704 #ifdef CONFIG_PPC_BOOK3S_604
705 .macro save_regs_thread thread
706 stw r0, THR0(\thread)
707 stw r3, THR3(\thread)
708 stw r4, THR4(\thread)
709 stw r5, THR5(\thread)
710 stw r6, THR6(\thread)
711 stw r8, THR8(\thread)
712 stw r9, THR9(\thread)
714 stw r0, THLR(\thread)
716 stw r0, THCTR(\thread)
719 .macro restore_regs_thread thread
720 lwz r0, THLR(\thread)
722 lwz r0, THCTR(\thread)
724 lwz r0, THR0(\thread)
725 lwz r3, THR3(\thread)
726 lwz r4, THR4(\thread)
727 lwz r5, THR5(\thread)
728 lwz r6, THR6(\thread)
729 lwz r8, THR8(\thread)
730 lwz r9, THR9(\thread)
739 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
741 mfspr r10, SPRN_SPRG_THREAD
742 restore_regs_thread r10
743 b .Lhash_page_dsi_cont
747 mfspr r10, SPRN_SPRG_THREAD
753 mfspr r10, SPRN_SPRG_THREAD
754 restore_regs_thread r10
756 b .Lhash_page_isi_cont
758 .globl fast_hash_page_return
759 fast_hash_page_return:
760 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
761 mfspr r10, SPRN_SPRG_THREAD
762 restore_regs_thread r10
768 mfspr r10, SPRN_SPRG_SCRATCH2
773 mfspr r11, SPRN_SPRG_SCRATCH1
774 mfspr r10, SPRN_SPRG_SCRATCH0
776 #endif /* CONFIG_PPC_BOOK3S_604 */
779 vmap_stack_overflow_exception
784 #ifdef CONFIG_ALTIVEC
786 bl load_up_altivec /* if from user, just load it up */
787 b fast_exception_return
788 #endif /* CONFIG_ALTIVEC */
789 1: addi r3,r1,STACK_FRAME_OVERHEAD
790 EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
794 addi r3,r1,STACK_FRAME_OVERHEAD
795 EXC_XFER_STD(0xf00, performance_monitor_exception)
799 * This code is jumped to from the startup code to copy
800 * the kernel image to physical address PHYSICAL_START.
803 addis r9,r26,klimit@ha /* fetch klimit */
805 addis r25,r25,-KERNELBASE@h
806 lis r3,PHYSICAL_START@h /* Destination base address */
807 li r6,0 /* Destination offset */
808 li r5,0x4000 /* # bytes of memory to copy */
809 bl copy_and_flush /* copy the first 0x4000 bytes */
810 addi r0,r3,4f@l /* jump to the address of 4f */
811 mtctr r0 /* in copy and do the rest. */
812 bctr /* jump to the copy */
814 bl copy_and_flush /* copy the rest */
818 * Copy routine used to copy the kernel to start at physical address 0
819 * and flush and invalidate the caches as needed.
820 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
821 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
823 _ENTRY(copy_and_flush)
826 4: li r0,L1_CACHE_BYTES/4
828 3: addi r6,r6,4 /* copy a cache line */
832 dcbst r6,r3 /* write it to memory */
834 icbi r6,r3 /* flush the icache line */
837 sync /* additional sync needed on g4 */
844 .globl __secondary_start_mpc86xx
845 __secondary_start_mpc86xx:
847 stw r3, __secondary_hold_acknowledge@l(0)
848 mr r24, r3 /* cpu # */
851 .globl __secondary_start_pmac_0
852 __secondary_start_pmac_0:
853 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
862 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
863 set to map the 0xf0000000 - 0xffffffff region */
865 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
869 .globl __secondary_start
871 /* Copy some CPU settings from CPU 0 */
872 bl __restore_cpu_setup
876 bl call_setup_cpu /* Call setup_cpu for this CPU */
880 /* get current's stack and current */
881 lis r2,secondary_current@ha
883 lwz r2,secondary_current@l(r2)
885 lwz r1,TASK_STACK(r1)
888 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
893 /* load up the MMU */
894 bl load_segment_registers
897 /* ptr to phys current thread */
899 addi r4,r4,THREAD /* phys address of our thread_struct */
900 mtspr SPRN_SPRG_THREAD,r4
901 BEGIN_MMU_FTR_SECTION
902 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
903 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
904 rlwinm r4, r4, 4, 0xffff01ff
906 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
908 /* enable MMU and jump to start_secondary */
910 lis r3,start_secondary@h
911 ori r3,r3,start_secondary@l
915 #endif /* CONFIG_SMP */
917 #ifdef CONFIG_KVM_BOOK3S_HANDLER
918 #include "../kvm/book3s_rmhandlers.S"
922 * Load stuff into the MMU. Intended to be called with
926 sync /* Force all PTE updates to finish */
928 tlbia /* Clear all TLB entries */
929 sync /* wait for tlbia/tlbie to finish */
930 TLBSYNC /* ... on all CPUs */
931 /* Load the SDR1 register (hash table base & size) */
932 lis r6, early_hash - PAGE_OFFSET@h
933 ori r6, r6, 3 /* 256kB table */
938 sync /* Force all PTE updates to finish */
940 tlbia /* Clear all TLB entries */
941 sync /* wait for tlbia/tlbie to finish */
942 TLBSYNC /* ... on all CPUs */
943 BEGIN_MMU_FTR_SECTION
944 /* Load the SDR1 register (hash table base & size) */
949 END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
951 /* Load the BAT registers with the values set up by MMU_init. */
959 BEGIN_MMU_FTR_SECTION
964 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
967 _GLOBAL(load_segment_registers)
968 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
969 mtctr r0 /* for context 0 */
970 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
971 #ifdef CONFIG_PPC_KUEP
972 oris r3, r3, SR_NX@h /* Set Nx */
974 #ifdef CONFIG_PPC_KUAP
975 oris r3, r3, SR_KS@h /* Set Ks */
979 addi r3, r3, 0x111 /* increment VSID */
980 addis r4, r4, 0x1000 /* address of next segment */
982 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
983 mtctr r0 /* for context 0 */
984 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
985 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
986 oris r3, r3, SR_KP@h /* Kp = 1 */
988 addi r3, r3, 0x111 /* increment VSID */
989 addis r4, r4, 0x1000 /* address of next segment */
994 * This is where the main kernel code starts.
999 ori r2,r2,init_task@l
1000 /* Set up for using our exception vectors */
1001 /* ptr to phys current thread */
1003 addi r4,r4,THREAD /* init task's THREAD */
1004 mtspr SPRN_SPRG_THREAD,r4
1005 BEGIN_MMU_FTR_SECTION
1006 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
1007 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
1008 rlwinm r4, r4, 4, 0xffff01ff
1010 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
1013 lis r1,init_thread_union@ha
1014 addi r1,r1,init_thread_union@l
1016 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1018 * Do early platform-specific initialization,
1019 * and set up the MMU.
1029 bl MMU_init_hw_patch
1032 * Go back to running unmapped so we can load up new values
1033 * for SDR1 (hash table pointer) and the segment registers
1034 * and change to using our exception vectors.
1039 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1045 /* Load up the kernel context */
1048 #ifdef CONFIG_BDI_SWITCH
1049 /* Add helper information for the Abatron bdiGDB debugger.
1050 * We do this here because we know the mmu is disabled, and
1051 * will be enabled for real in just a few instructions.
1053 lis r5, abatron_pteptrs@h
1054 ori r5, r5, abatron_pteptrs@l
1055 stw r5, 0xf0(0) /* This much match your Abatron config */
1056 lis r6, swapper_pg_dir@h
1057 ori r6, r6, swapper_pg_dir@l
1060 #endif /* CONFIG_BDI_SWITCH */
1062 /* Now turn on the MMU for real! */
1064 lis r3,start_kernel@h
1065 ori r3,r3,start_kernel@l
1071 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1073 * Set up the segment registers for a new context.
1075 _ENTRY(switch_mmu_context)
1076 lwz r3,MMCONTEXTID(r4)
1079 mulli r3,r3,897 /* multiply context by skew factor */
1080 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1081 #ifdef CONFIG_PPC_KUEP
1082 oris r3, r3, SR_NX@h /* Set Nx */
1084 #ifdef CONFIG_PPC_KUAP
1085 oris r3, r3, SR_KS@h /* Set Ks */
1087 li r0,NUM_USER_SEGMENTS
1090 #ifdef CONFIG_BDI_SWITCH
1091 /* Context switch the PTE pointer for the Abatron BDI2000.
1092 * The PGDIR is passed as second argument.
1095 lis r5, abatron_pteptrs@ha
1096 stw r4, abatron_pteptrs@l + 0x4(r5)
1098 BEGIN_MMU_FTR_SECTION
1099 #ifndef CONFIG_BDI_SWITCH
1103 rlwinm r4, r4, 4, 0xffff01ff
1105 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
1110 addi r3,r3,0x111 /* next VSID */
1111 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1112 addis r4,r4,0x1000 /* address of next segment */
1118 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1120 EXPORT_SYMBOL(switch_mmu_context)
1123 * An undocumented "feature" of 604e requires that the v bit
1124 * be cleared before changing BAT values.
1126 * Also, newer IBM firmware does not clear bat3 and 4 so
1127 * this makes sure it's done.
1133 mtspr SPRN_DBAT0U,r10
1134 mtspr SPRN_DBAT0L,r10
1135 mtspr SPRN_DBAT1U,r10
1136 mtspr SPRN_DBAT1L,r10
1137 mtspr SPRN_DBAT2U,r10
1138 mtspr SPRN_DBAT2L,r10
1139 mtspr SPRN_DBAT3U,r10
1140 mtspr SPRN_DBAT3L,r10
1141 mtspr SPRN_IBAT0U,r10
1142 mtspr SPRN_IBAT0L,r10
1143 mtspr SPRN_IBAT1U,r10
1144 mtspr SPRN_IBAT1L,r10
1145 mtspr SPRN_IBAT2U,r10
1146 mtspr SPRN_IBAT2L,r10
1147 mtspr SPRN_IBAT3U,r10
1148 mtspr SPRN_IBAT3L,r10
1149 BEGIN_MMU_FTR_SECTION
1150 /* Here's a tweak: at this point, CPU setup have
1151 * not been called yet, so HIGH_BAT_EN may not be
1152 * set in HID0 for the 745x processors. However, it
1153 * seems that doesn't affect our ability to actually
1154 * write to these SPRs.
1156 mtspr SPRN_DBAT4U,r10
1157 mtspr SPRN_DBAT4L,r10
1158 mtspr SPRN_DBAT5U,r10
1159 mtspr SPRN_DBAT5L,r10
1160 mtspr SPRN_DBAT6U,r10
1161 mtspr SPRN_DBAT6L,r10
1162 mtspr SPRN_DBAT7U,r10
1163 mtspr SPRN_DBAT7L,r10
1164 mtspr SPRN_IBAT4U,r10
1165 mtspr SPRN_IBAT4L,r10
1166 mtspr SPRN_IBAT5U,r10
1167 mtspr SPRN_IBAT5L,r10
1168 mtspr SPRN_IBAT6U,r10
1169 mtspr SPRN_IBAT6L,r10
1170 mtspr SPRN_IBAT7U,r10
1171 mtspr SPRN_IBAT7L,r10
1172 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1181 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1182 rlwinm r0, r6, 0, ~MSR_RI
1183 rlwinm r0, r0, 0, ~MSR_EE
1194 LOAD_BAT(0, r3, r4, r5)
1195 LOAD_BAT(1, r3, r4, r5)
1196 LOAD_BAT(2, r3, r4, r5)
1197 LOAD_BAT(3, r3, r4, r5)
1198 BEGIN_MMU_FTR_SECTION
1199 LOAD_BAT(4, r3, r4, r5)
1200 LOAD_BAT(5, r3, r4, r5)
1201 LOAD_BAT(6, r3, r4, r5)
1202 LOAD_BAT(7, r3, r4, r5)
1203 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1204 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1212 1: addic. r10, r10, -0x1000
1219 addi r4, r3, __after_mmu_off - _start
1221 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1231 /* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1233 lis r11,PAGE_OFFSET@h
1236 ori r8,r8,0x12 /* R/W access, M=1 */
1238 ori r8,r8,2 /* R/W access */
1239 #endif /* CONFIG_SMP */
1240 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1242 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
1243 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1244 mtspr SPRN_IBAT0L,r8
1245 mtspr SPRN_IBAT0U,r11
1249 #ifdef CONFIG_BOOTX_TEXT
1252 * setup the display bat prepared for us in prom.c
1257 addis r8,r3,disp_BAT@ha
1258 addi r8,r8,disp_BAT@l
1263 mtspr SPRN_DBAT3L,r8
1264 mtspr SPRN_DBAT3U,r11
1266 #endif /* CONFIG_BOOTX_TEXT */
1268 #ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1272 mtspr SPRN_DBAT1L, r8
1275 ori r11, r11, (BL_1M << 2) | 2
1276 mtspr SPRN_DBAT1U, r11
1281 #ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1283 /* prepare a BAT for early io */
1284 #if defined(CONFIG_GAMECUBE)
1286 #elif defined(CONFIG_WII)
1289 #error Invalid platform for USB Gecko based early debugging.
1292 * The virtual address used must match the virtual address
1293 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1295 lis r11, 0xfffe /* top 128K */
1296 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1297 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1298 mtspr SPRN_DBAT1L, r8
1299 mtspr SPRN_DBAT1U, r11
1304 /* Jump into the system reset for the rom.
1305 * We first disable the MMU, and then jump to the ROM reset address.
1307 * r3 is the board info structure, r4 is the location for starting.
1308 * I use this for building a small kernel that can load other kernels,
1309 * rather than trying to write or rely on a rom monitor that can tftp load.
1314 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1318 mfspr r11, SPRN_HID0
1320 ori r10,r10,HID0_ICE|HID0_DCE
1322 mtspr SPRN_HID0, r11
1324 li r5, MSR_ME|MSR_RI
1326 addis r6,r6,-KERNELBASE@h
1340 * We put a few things here that have to be page-aligned.
1341 * This stuff goes at the beginning of the data segment,
1342 * which is page-aligned.
1347 .globl empty_zero_page
1350 EXPORT_SYMBOL(empty_zero_page)
1352 .globl swapper_pg_dir
1354 .space PGD_TABLE_SIZE
1356 /* Room for two PTE pointers, usually the kernel and current user pointers
1357 * to their respective root page table.