3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <asm/unistd.h>
24 #include <asm/processor.h>
27 #include <asm/thread_info.h>
28 #include <asm/code-patching-asm.h>
29 #include <asm/ppc_asm.h>
30 #include <asm/asm-offsets.h>
31 #include <asm/cputable.h>
32 #include <asm/firmware.h>
34 #include <asm/ptrace.h>
35 #include <asm/irqflags.h>
36 #include <asm/hw_irq.h>
37 #include <asm/context_tracking.h>
39 #include <asm/ppc-opcode.h>
40 #include <asm/barrier.h>
41 #include <asm/export.h>
42 #include <asm/asm-compat.h>
43 #ifdef CONFIG_PPC_BOOK3S
44 #include <asm/exception-64s.h>
46 #include <asm/exception-64e.h>
48 #include <asm/feature-fixups.h>
55 .tc sys_call_table[TC],sys_call_table
57 COMPAT_SYS_CALL_TABLE:
58 .tc compat_sys_call_table[TC],compat_sys_call_table
60 /* This value is used to mark exception frames on the stack. */
62 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
67 .globl system_call_common
69 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
71 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
73 END_FTR_SECTION_IFSET(CPU_FTR_TM)
77 addi r1,r1,-INT_FRAME_SIZE
85 beq 2f /* if from kernel mode */
86 #ifdef CONFIG_PPC_FSL_BOOK3E
87 START_BTB_FLUSH_SECTION
91 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
110 * This clears CR0.SO (bit 28), which is the error indication on
111 * return from this system call.
113 rldimi r2,r11,28,(63-28)
120 addi r9,r1,STACK_FRAME_OVERHEAD
121 ld r11,exception_marker@toc(r2)
122 std r11,-16(r9) /* "regshere" marker */
123 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
126 /* if from user, see if there are any DTL entries to process */
127 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
128 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
129 addi r10,r10,LPPACA_DTLIDX
130 LDX_BE r10,0,r10 /* get log write index */
133 bl accumulate_stolen_time
137 addi r9,r1,STACK_FRAME_OVERHEAD
139 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
140 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
143 * A syscall should always be called with interrupts enabled
144 * so we just unconditionally hard-enable here. When some kind
145 * of irq tracing is used, we additionally check that condition
148 #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
149 lbz r10,PACAIRQSOFTMASK(r13)
150 1: tdnei r10,IRQS_ENABLED
151 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
154 #ifdef CONFIG_PPC_BOOK3E
160 #endif /* CONFIG_PPC_BOOK3E */
162 system_call: /* label this so stack traces look sane */
163 /* We do need to set SOFTE in the stack frame or the return
164 * from interrupt will be painful
169 CURRENT_THREAD_INFO(r11, r1)
171 andi. r11,r10,_TIF_SYSCALL_DOTRACE
172 bne .Lsyscall_dotrace /* does not return */
173 cmpldi 0,r0,NR_syscalls
174 bge- .Lsyscall_enosys
178 * Need to vector to 32 Bit or default sys_call_table here,
179 * based on caller's run-mode / personality.
181 ld r11,SYS_CALL_TABLE@toc(2)
182 andis. r10,r10,_TIF_32BIT@h
184 ld r11,COMPAT_SYS_CALL_TABLE@toc(2)
196 * Prevent the load of the handler below (based on the user-passed
197 * system call number) being speculatively executed until the test
198 * against NR_syscalls and branch to .Lsyscall_enosys above has
202 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
204 bctrl /* Call handler */
209 #ifdef CONFIG_DEBUG_RSEQ
210 /* Check whether the syscall is issued inside a restartable sequence */
211 addi r3,r1,STACK_FRAME_OVERHEAD
216 CURRENT_THREAD_INFO(r12, r1)
219 #ifdef CONFIG_PPC_BOOK3S
220 /* No MSR:RI on BookE */
222 beq- .Lunrecov_restore
226 * This is a few instructions into the actual syscall exit path (which actually
227 * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the
228 * number of visible symbols for profiling purposes.
230 * We can probe from system_call until this point as MSR_RI is set. But once it
231 * is cleared below, we won't be able to take a trap.
233 * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL().
237 * Disable interrupts so current_thread_info()->flags can't change,
238 * and so that we don't get interrupted after loading SRR0/1.
240 #ifdef CONFIG_PPC_BOOK3E
244 * For performance reasons we clear RI the same time that we
245 * clear EE. We only need to clear RI just before we restore r13
246 * below, but batching it with EE saves us one expensive mtmsrd call.
247 * We have to be careful to restore RI if we branch anywhere from
248 * here (eg syscall_exit_work).
252 #endif /* CONFIG_PPC_BOOK3E */
256 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
257 bne- .Lsyscall_exit_work
261 #ifdef CONFIG_ALTIVEC
262 andis. r0,r8,MSR_VEC@h
265 2: addi r3,r1,STACK_FRAME_OVERHEAD
266 #ifdef CONFIG_PPC_BOOK3S
268 mtmsrd r10,1 /* Restore RI */
271 #ifdef CONFIG_PPC_BOOK3S
282 .Lsyscall_error_cont:
285 stdcx. r0,0,r1 /* to clear the reservation */
286 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
291 ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
295 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
297 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
298 std r8, PACATMSCRATCH(r13)
301 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
309 b . /* prevent speculative execution */
319 b . /* prevent speculative execution */
322 oris r5,r5,0x1000 /* Set SO bit in CR */
325 b .Lsyscall_error_cont
327 /* Traced system call support */
330 addi r3,r1,STACK_FRAME_OVERHEAD
331 bl do_syscall_trace_enter
334 * We use the return value of do_syscall_trace_enter() as the syscall
335 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
336 * returns an invalid syscall number and the test below against
337 * NR_syscalls will fail.
341 /* Restore argument registers just clobbered and/or possibly changed. */
349 /* Repopulate r9 and r10 for the syscall path */
350 addi r9,r1,STACK_FRAME_OVERHEAD
351 CURRENT_THREAD_INFO(r10, r1)
354 cmpldi r0,NR_syscalls
357 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
366 #ifdef CONFIG_PPC_BOOK3S
368 mtmsrd r10,1 /* Restore RI */
370 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
371 If TIF_NOERROR is set, just save r3 as it is. */
373 andi. r0,r9,_TIF_RESTOREALL
377 0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
379 andi. r0,r9,_TIF_NOERROR
383 oris r5,r5,0x1000 /* Set SO bit in CR */
386 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
389 /* Clear per-syscall TIF flags if any are set. */
391 li r11,_TIF_PERSYSCALL_MASK
392 addi r12,r12,TI_FLAGS
397 subi r12,r12,TI_FLAGS
399 4: /* Anything else left to do? */
401 lis r3,DEFAULT_PPR@highest /* Set default PPR */
402 sldi r3,r3,32 /* bits 11-13 are used for ppr */
404 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
406 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
407 beq ret_from_except_lite
409 /* Re-enable interrupts */
410 #ifdef CONFIG_PPC_BOOK3E
416 #endif /* CONFIG_PPC_BOOK3E */
419 addi r3,r1,STACK_FRAME_OVERHEAD
420 bl do_syscall_trace_leave
423 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
425 /* Firstly we need to enable TM in the kernel */
428 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
431 /* tabort, this dooms the transaction, nothing else */
432 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
436 * Return directly to userspace. We have corrupted user register state,
437 * but userspace will never see that register state. Execution will
438 * resume after the tbegin of the aborted transaction with the
439 * checkpointed register state.
447 b . /* prevent speculative execution */
449 _ASM_NOKPROBE_SYMBOL(system_call_common);
450 _ASM_NOKPROBE_SYMBOL(system_call_exit);
452 /* Save non-volatile GPRs, if not already saved. */
461 _ASM_NOKPROBE_SYMBOL(save_nvgprs);
465 * The sigsuspend and rt_sigsuspend system calls can call do_signal
466 * and thus put the process into the stopped state where we might
467 * want to examine its user state with ptrace. Therefore we need
468 * to save all the nonvolatile registers (r14 - r31) before calling
469 * the C code. Similarly, fork, vfork and clone need the full
470 * register state on the stack so that it can be copied to the child.
488 _GLOBAL(ppc32_swapcontext)
490 bl compat_sys_swapcontext
493 _GLOBAL(ppc64_swapcontext)
498 _GLOBAL(ppc_switch_endian)
503 _GLOBAL(ret_from_fork)
509 _GLOBAL(ret_from_kernel_thread)
514 #ifdef PPC64_ELF_ABI_v2
521 #ifdef CONFIG_PPC_BOOK3S_64
523 #define FLUSH_COUNT_CACHE \
525 patch_site 1b, patch__call_flush_count_cache
528 #define BCCTR_FLUSH .long 0x4c400420
537 .global flush_count_cache
539 /* Save LR into r9 */
557 patch_site 2b patch__flush_count_cache_return
569 #define FLUSH_COUNT_CACHE
570 #endif /* CONFIG_PPC_BOOK3S_64 */
573 * This routine switches between two different tasks. The process
574 * state of one is saved on its kernel stack. Then the state
575 * of the other is restored from its kernel stack. The memory
576 * management hardware is updated to the second process's state.
577 * Finally, we can return to the second process, via ret_from_except.
578 * On entry, r3 points to the THREAD for the current task, r4
579 * points to the THREAD for the new task.
581 * Note: there are two ways to get to the "going out" portion
582 * of this code; either by coming in via the entry (_switch)
583 * or via "fork" which must set up an environment equivalent
584 * to the "_switch" path. If you change this you'll have to change
585 * the fork code also.
587 * The code which creates the new task context is in 'copy_thread'
588 * in arch/powerpc/kernel/process.c
594 stdu r1,-SWITCH_FRAME_SIZE(r1)
595 /* r3-r13 are caller saved -- Cort */
598 std r0,_NIP(r1) /* Return to switch caller */
601 std r1,KSP(r3) /* Set old stack pointer */
606 * On SMP kernels, care must be taken because a task may be
607 * scheduled off CPUx and on to CPUy. Memory ordering must be
610 * Cacheable stores on CPUx will be visible when the task is
611 * scheduled on CPUy by virtue of the core scheduler barriers
612 * (see "Notes on Program-Order guarantees on SMP systems." in
613 * kernel/sched/core.c).
615 * Uncacheable stores in the case of involuntary preemption must
616 * be taken care of. The smp_mb__before_spin_lock() in __schedule()
617 * is implemented as hwsync on powerpc, which orders MMIO too. So
618 * long as there is an hwsync in the context switch path, it will
619 * be executed on the source CPU after the task has performed
620 * all MMIO ops on that CPU, and on the destination CPU before the
621 * task performs any MMIO ops there.
625 * The kernel context switch path must contain a spin_lock,
626 * which contains larx/stcx, which will clear any reservation
627 * of the task being switched.
629 #ifdef CONFIG_PPC_BOOK3S
630 /* Cancel all explict user streams as they will have no use after context
631 * switch and will stop the HW from creating streams itself
633 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
636 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
637 std r6,PACACURRENT(r13) /* Set new 'current' */
638 #if defined(CONFIG_STACKPROTECTOR)
639 ld r6, TASK_CANARY(r6)
640 std r6, PACA_CANARY(r13)
643 ld r8,KSP(r4) /* new stack pointer */
644 #ifdef CONFIG_PPC_BOOK3S_64
645 BEGIN_MMU_FTR_SECTION
647 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
649 clrrdi r6,r8,28 /* get its ESID */
650 clrrdi r9,r1,28 /* get current sp ESID */
652 clrrdi r6,r8,40 /* get its 1T ESID */
653 clrrdi r9,r1,40 /* get current sp 1T ESID */
654 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
655 clrldi. r0,r6,2 /* is new ESID c00000000? */
656 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
658 beq 2f /* if yes, don't slbie it */
660 /* Bolt in the new stack SLB entry */
661 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
662 oris r0,r6,(SLB_ESID_V)@h
663 ori r0,r0,(SLB_NUM_BOLTED-1)@l
665 li r9,MMU_SEGSIZE_1T /* insert B field */
666 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
667 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
668 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
670 /* Update the last bolted SLB. No write barriers are needed
671 * here, provided we only update the current CPU's SLB shadow
674 ld r9,PACA_SLBSHADOWPTR(r13)
676 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
677 li r12,SLBSHADOW_STACKVSID
678 STDX_BE r7,r12,r9 /* Save VSID */
679 li r12,SLBSHADOW_STACKESID
680 STDX_BE r0,r12,r9 /* Save ESID */
682 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
683 * we have 1TB segments, the only CPUs known to have the errata
684 * only support less than 1TB of system memory and we'll never
685 * actually hit this code path.
691 slbie r6 /* Workaround POWER5 < DD2.1 issue */
692 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
696 #endif /* CONFIG_PPC_BOOK3S_64 */
698 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
699 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
700 because we don't need to leave the 288-byte ABI gap at the
701 top of the kernel stack. */
702 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
705 * PMU interrupts in radix may come in here. They will use r1, not
706 * PACAKSAVE, so this stack switch will not cause a problem. They
707 * will store to the process stack, which may then be migrated to
708 * another CPU. However the rq lock release on this CPU paired with
709 * the rq lock acquire on the new CPU before the stack becomes
710 * active on the new CPU, will order those stores.
712 mr r1,r8 /* start using new stack pointer */
713 std r7,PACAKSAVE(r13)
718 /* r3-r13 are destroyed -- Cort */
722 /* convert old thread to its task_struct for return value */
724 ld r7,_NIP(r1) /* Return to _switch caller in new task */
726 addi r1,r1,SWITCH_FRAME_SIZE
730 _GLOBAL(ret_from_except)
733 bne ret_from_except_lite
736 _GLOBAL(ret_from_except_lite)
738 * Disable interrupts so that current_thread_info()->flags
739 * can't change between when we test it and when we return
740 * from the interrupt.
742 #ifdef CONFIG_PPC_BOOK3E
746 mtmsrd r10,1 /* Update machine state */
747 #endif /* CONFIG_PPC_BOOK3E */
749 CURRENT_THREAD_INFO(r9, r1)
751 #ifdef CONFIG_PPC_BOOK3E
752 ld r10,PACACURRENT(r13)
753 #endif /* CONFIG_PPC_BOOK3E */
757 #ifdef CONFIG_PPC_BOOK3E
758 lwz r3,(THREAD+THREAD_DBCR0)(r10)
759 #endif /* CONFIG_PPC_BOOK3E */
761 /* Check current_thread_info()->flags */
762 andi. r0,r4,_TIF_USER_WORK_MASK
764 #ifdef CONFIG_PPC_BOOK3E
766 * Check to see if the dbcr0 register is set up to debug.
767 * Use the internal debug mode bit to do this.
769 andis. r0,r3,DBCR0_IDM@h
772 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
779 addi r3,r1,STACK_FRAME_OVERHEAD
783 1: andi. r0,r4,_TIF_NEED_RESCHED
785 bl restore_interrupts
787 b ret_from_except_lite
789 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
790 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
791 bne 3f /* only restore TM if nothing else to do */
792 addi r3,r1,STACK_FRAME_OVERHEAD
799 * Use a non volatile GPR to save and restore our thread_info flags
800 * across the call to restore_interrupts.
803 bl restore_interrupts
805 addi r3,r1,STACK_FRAME_OVERHEAD
810 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
811 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
814 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
817 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
818 mr r4,r1 /* src: current exception frame */
819 mr r1,r3 /* Reroute the trampoline frame to r1 */
821 /* Copy from the original to the trampoline. */
822 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
823 li r6,0 /* start offset: 0 */
830 /* Do real store operation to complete stdu */
834 /* Clear _TIF_EMULATE_STACK_STORE flag */
835 lis r11,_TIF_EMULATE_STACK_STORE@h
843 #ifdef CONFIG_PREEMPT
844 /* Check if we need to preempt */
845 andi. r0,r4,_TIF_NEED_RESCHED
847 /* Check that preempt_count() == 0 and interrupts are enabled */
848 lwz r8,TI_PREEMPT(r9)
852 andi. r0,r0,IRQS_DISABLED
856 * Here we are preempting the current task. We want to make
857 * sure we are soft-disabled first and reconcile irq state.
859 RECONCILE_IRQ_STATE(r3,r4)
860 1: bl preempt_schedule_irq
862 /* Re-test flags and eventually loop */
863 CURRENT_THREAD_INFO(r9, r1)
865 andi. r0,r4,_TIF_NEED_RESCHED
869 * arch_local_irq_restore() from preempt_schedule_irq above may
870 * enable hard interrupt but we really should disable interrupts
871 * when we return from the interrupt, and so that we don't get
872 * interrupted after loading SRR0/1.
874 #ifdef CONFIG_PPC_BOOK3E
878 mtmsrd r10,1 /* Update machine state */
879 #endif /* CONFIG_PPC_BOOK3E */
880 #endif /* CONFIG_PREEMPT */
882 .globl fast_exc_return_irq
886 * This is the main kernel exit path. First we check if we
887 * are about to re-enable interrupts
890 lbz r6,PACAIRQSOFTMASK(r13)
891 andi. r5,r5,IRQS_DISABLED
892 bne .Lrestore_irq_off
894 /* We are enabling, were we already enabled ? Yes, just return */
895 andi. r6,r6,IRQS_DISABLED
899 * We are about to soft-enable interrupts (we are hard disabled
900 * at this point). We check if there's anything that needs to
903 lbz r0,PACAIRQHAPPENED(r13)
905 bne- .Lrestore_check_irq_replay
908 * Get here when nothing happened while soft-disabled, just
909 * soft-enable and move-on. We will hard-enable as a side
915 stb r0,PACAIRQSOFTMASK(r13);
918 * Final return path. BookE is handled in a different file
921 #ifdef CONFIG_PPC_BOOK3E
922 b exception_return_book3e
925 * Clear the reservation. If we know the CPU tracks the address of
926 * the reservation then we can potentially save some cycles and use
927 * a larx. On POWER6 and POWER7 this is significantly faster.
930 stdcx. r0,0,r1 /* to clear the reservation */
933 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
936 * Some code path such as load_up_fpu or altivec return directly
937 * here. They run entirely hard disabled and do not alter the
938 * interrupt state. They also don't use lwarx/stwcx. and thus
939 * are known not to leave dangling reservations.
941 .globl fast_exception_return
942 fast_exception_return:
954 beq- .Lunrecov_restore
957 * Clear RI before restoring r13. If we are returning to
958 * userspace and we take an exception after restoring r13,
959 * we end up corrupting the userspace r13 value.
964 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
966 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
969 * r13 is our per cpu area, only restore it if we are returning to
970 * userspace the value stored in the stack frame may belong to
979 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
980 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
996 b . /* prevent speculative execution */
998 1: mtspr SPRN_SRR1,r3
1011 b . /* prevent speculative execution */
1013 #endif /* CONFIG_PPC_BOOK3E */
1016 * We are returning to a context with interrupts soft disabled.
1018 * However, we may also about to hard enable, so we need to
1019 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
1020 * or that bit can get out of sync and bad things will happen
1024 lbz r7,PACAIRQHAPPENED(r13)
1027 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
1028 stb r7,PACAIRQHAPPENED(r13)
1030 #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
1031 /* The interrupt should not have soft enabled. */
1032 lbz r7,PACAIRQSOFTMASK(r13)
1033 1: tdeqi r7,IRQS_ENABLED
1034 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1039 * Something did happen, check if a re-emit is needed
1040 * (this also clears paca->irq_happened)
1042 .Lrestore_check_irq_replay:
1043 /* XXX: We could implement a fast path here where we check
1044 * for irq_happened being just 0x01, in which case we can
1045 * clear it and return. That means that we would potentially
1046 * miss a decrementer having wrapped all the way around.
1048 * Still, this might be useful for things like hash_page
1050 bl __check_irq_replay
1052 beq .Lrestore_no_replay
1055 * We need to re-emit an interrupt. We do so by re-using our
1056 * existing exception frame. We first change the trap value,
1057 * but we need to ensure we preserve the low nibble of it
1065 * PACA_IRQ_HARD_DIS won't always be set here, so set it now
1066 * to reconcile the IRQ state. Tracing is already accounted for.
1068 lbz r4,PACAIRQHAPPENED(r13)
1069 ori r4,r4,PACA_IRQ_HARD_DIS
1070 stb r4,PACAIRQHAPPENED(r13)
1073 * Then find the right handler and call it. Interrupts are
1074 * still soft-disabled and we keep them that way.
1078 addi r3,r1,STACK_FRAME_OVERHEAD;
1081 1: cmpwi cr0,r3,0xf00
1083 addi r3,r1,STACK_FRAME_OVERHEAD;
1084 bl performance_monitor_exception
1086 1: cmpwi cr0,r3,0xe60
1088 addi r3,r1,STACK_FRAME_OVERHEAD;
1089 bl handle_hmi_exception
1091 1: cmpwi cr0,r3,0x900
1093 addi r3,r1,STACK_FRAME_OVERHEAD;
1096 #ifdef CONFIG_PPC_DOORBELL
1098 #ifdef CONFIG_PPC_BOOK3E
1102 #endif /* CONFIG_PPC_BOOK3E */
1104 addi r3,r1,STACK_FRAME_OVERHEAD;
1105 bl doorbell_exception
1106 #endif /* CONFIG_PPC_DOORBELL */
1107 1: b ret_from_except /* What else to do here ? */
1110 addi r3,r1,STACK_FRAME_OVERHEAD
1111 bl unrecoverable_exception
1114 _ASM_NOKPROBE_SYMBOL(ret_from_except);
1115 _ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
1116 _ASM_NOKPROBE_SYMBOL(resume_kernel);
1117 _ASM_NOKPROBE_SYMBOL(fast_exc_return_irq);
1118 _ASM_NOKPROBE_SYMBOL(restore);
1119 _ASM_NOKPROBE_SYMBOL(fast_exception_return);
1122 #ifdef CONFIG_PPC_RTAS
1124 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1125 * called with the MMU off.
1127 * In addition, we need to be in 32b mode, at least for now.
1129 * Note: r3 is an input parameter to rtas, so don't trash it...
1134 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
1136 /* Because RTAS is running in 32b mode, it clobbers the high order half
1137 * of all registers that it saves. We therefore save those registers
1138 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
1140 SAVE_GPR(2, r1) /* Save the TOC */
1141 SAVE_GPR(13, r1) /* Save paca */
1142 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
1143 SAVE_10GPRS(22, r1) /* ditto */
1156 /* Temporary workaround to clear CR until RTAS can be modified to
1163 /* There is no way it is acceptable to get here with interrupts enabled,
1164 * check it with the asm equivalent of WARN_ON
1166 lbz r0,PACAIRQSOFTMASK(r13)
1167 1: tdeqi r0,IRQS_ENABLED
1168 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1171 /* Hard-disable interrupts */
1177 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1178 * so they are saved in the PACA which allows us to restore
1179 * our original state after RTAS returns.
1182 std r6,PACASAVEDMSR(r13)
1184 /* Setup our real return addr */
1185 LOAD_REG_ADDR(r4,rtas_return_loc)
1186 clrldi r4,r4,2 /* convert to realmode address */
1190 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1194 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1195 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1199 sync /* disable interrupts so SRR0/1 */
1200 mtmsrd r0 /* don't get trashed */
1202 LOAD_REG_ADDR(r4, rtas)
1203 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1204 ld r4,RTASBASE(r4) /* get the rtas->base value */
1209 b . /* prevent speculative execution */
1215 * Clear RI and set SF before anything.
1220 sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
1225 /* relocation is off at this point */
1227 clrldi r4,r4,2 /* convert to realmode address */
1231 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
1233 ld r1,PACAR1(r4) /* Restore our SP */
1234 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1239 b . /* prevent speculative execution */
1240 _ASM_NOKPROBE_SYMBOL(__enter_rtas)
1241 _ASM_NOKPROBE_SYMBOL(rtas_return_loc)
1244 1: .8byte rtas_restore_regs
1247 /* relocation is on at this point */
1248 REST_GPR(2, r1) /* Restore the TOC */
1249 REST_GPR(13, r1) /* Restore paca */
1250 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1251 REST_10GPRS(22, r1) /* ditto */
1266 addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
1267 ld r0,16(r1) /* get return address */
1270 blr /* return to caller */
1272 #endif /* CONFIG_PPC_RTAS */
1277 stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
1279 /* Because PROM is running in 32b mode, it clobbers the high order half
1280 * of all registers that it saves. We therefore save those registers
1281 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1292 /* Put PROM address in SRR0 */
1295 /* Setup our trampoline return addr in LR */
1298 addi r4,r4,(1f - 0b)
1301 /* Prepare a 32-bit mode big endian MSR
1303 #ifdef CONFIG_PPC_BOOK3E
1304 rlwinm r11,r11,0,1,31
1307 #else /* CONFIG_PPC_BOOK3E */
1308 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1312 #endif /* CONFIG_PPC_BOOK3E */
1314 1: /* Return from OF */
1317 /* Just make sure that r1 top 32 bits didn't get
1322 /* Restore the MSR (back to 64 bits) */
1327 /* Restore other registers */
1335 addi r1,r1,SWITCH_FRAME_SIZE