3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <asm/unistd.h>
24 #include <asm/processor.h>
27 #include <asm/thread_info.h>
28 #include <asm/ppc_asm.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/cputable.h>
31 #include <asm/firmware.h>
33 #include <asm/ptrace.h>
34 #include <asm/irqflags.h>
35 #include <asm/hw_irq.h>
36 #include <asm/context_tracking.h>
38 #include <asm/ppc-opcode.h>
39 #include <asm/export.h>
46 .tc sys_call_table[TC],sys_call_table
48 /* This value is used to mark exception frames on the stack. */
50 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
55 .globl system_call_common
57 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
59 extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
61 END_FTR_SECTION_IFSET(CPU_FTR_TM)
65 addi r1,r1,-INT_FRAME_SIZE
73 beq 2f /* if from kernel mode */
74 ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
93 * This clears CR0.SO (bit 28), which is the error indication on
94 * return from this system call.
96 rldimi r2,r11,28,(63-28)
103 addi r9,r1,STACK_FRAME_OVERHEAD
104 ld r11,exception_marker@toc(r2)
105 std r11,-16(r9) /* "regshere" marker */
106 #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
109 /* if from user, see if there are any DTL entries to process */
110 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
111 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
112 addi r10,r10,LPPACA_DTLIDX
113 LDX_BE r10,0,r10 /* get log write index */
116 bl accumulate_stolen_time
120 addi r9,r1,STACK_FRAME_OVERHEAD
122 END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
123 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
126 * A syscall should always be called with interrupts enabled
127 * so we just unconditionally hard-enable here. When some kind
128 * of irq tracing is used, we additionally check that condition
131 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
132 lbz r10,PACASOFTIRQEN(r13)
135 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
138 #ifdef CONFIG_PPC_BOOK3E
144 #endif /* CONFIG_PPC_BOOK3E */
146 /* We do need to set SOFTE in the stack frame or the return
147 * from interrupt will be painful
152 CURRENT_THREAD_INFO(r11, r1)
154 andi. r11,r10,_TIF_SYSCALL_DOTRACE
155 bne syscall_dotrace /* does not return */
156 cmpldi 0,r0,NR_syscalls
159 system_call: /* label this so stack traces look sane */
161 * Need to vector to 32 Bit or default sys_call_table here,
162 * based on caller's run-mode / personality.
164 ld r11,SYS_CALL_TABLE@toc(2)
165 andi. r10,r10,_TIF_32BIT
167 addi r11,r11,8 /* use 32-bit syscall entries */
176 ldx r12,r11,r0 /* Fetch system call handler [ptr] */
178 bctrl /* Call handler */
182 CURRENT_THREAD_INFO(r12, r1)
185 #ifdef CONFIG_PPC_BOOK3S
186 /* No MSR:RI on BookE */
191 * Disable interrupts so current_thread_info()->flags can't change,
192 * and so that we don't get interrupted after loading SRR0/1.
194 #ifdef CONFIG_PPC_BOOK3E
198 * For performance reasons we clear RI the same time that we
199 * clear EE. We only need to clear RI just before we restore r13
200 * below, but batching it with EE saves us one expensive mtmsrd call.
201 * We have to be careful to restore RI if we branch anywhere from
202 * here (eg syscall_exit_work).
206 #endif /* CONFIG_PPC_BOOK3E */
210 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
211 bne- syscall_exit_work
213 /* If MSR_FP and MSR_VEC are set in user msr, then no need to restore */
215 #ifdef CONFIG_ALTIVEC
220 bne syscall_restore_math
221 .Lsyscall_restore_math_cont:
226 .Lsyscall_error_cont:
229 stdcx. r0,0,r1 /* to clear the reservation */
230 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
235 ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
239 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
241 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
249 b . /* prevent speculative execution */
252 oris r5,r5,0x1000 /* Set SO bit in CR */
255 b .Lsyscall_error_cont
257 syscall_restore_math:
259 * Some initial tests from restore_math to avoid the heavyweight
260 * C code entry and MSR manipulations.
262 LOAD_REG_IMMEDIATE(r0, MSR_TS_MASK)
266 ld r7,PACACURRENT(r13)
267 lbz r0,THREAD+THREAD_LOAD_FP(r7)
268 #ifdef CONFIG_ALTIVEC
269 lbz r6,THREAD+THREAD_LOAD_VEC(r7)
273 beq .Lsyscall_restore_math_cont
275 1: addi r3,r1,STACK_FRAME_OVERHEAD
276 #ifdef CONFIG_PPC_BOOK3S
278 mtmsrd r10,1 /* Restore RI */
281 #ifdef CONFIG_PPC_BOOK3S
285 /* Restore volatiles, reload MSR from updated one */
289 b .Lsyscall_restore_math_cont
291 /* Traced system call support */
294 addi r3,r1,STACK_FRAME_OVERHEAD
295 bl do_syscall_trace_enter
298 * We use the return value of do_syscall_trace_enter() as the syscall
299 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
300 * returns an invalid syscall number and the test below against
301 * NR_syscalls will fail.
305 /* Restore argument registers just clobbered and/or possibly changed. */
313 /* Repopulate r9 and r10 for the system_call path */
314 addi r9,r1,STACK_FRAME_OVERHEAD
315 CURRENT_THREAD_INFO(r10, r1)
318 cmpldi r0,NR_syscalls
321 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
330 #ifdef CONFIG_PPC_BOOK3S
332 mtmsrd r10,1 /* Restore RI */
334 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
335 If TIF_NOERROR is set, just save r3 as it is. */
337 andi. r0,r9,_TIF_RESTOREALL
341 0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
343 andi. r0,r9,_TIF_NOERROR
347 oris r5,r5,0x1000 /* Set SO bit in CR */
350 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
353 /* Clear per-syscall TIF flags if any are set. */
355 li r11,_TIF_PERSYSCALL_MASK
356 addi r12,r12,TI_FLAGS
361 subi r12,r12,TI_FLAGS
363 4: /* Anything else left to do? */
365 lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
366 ld r10,PACACURRENT(r13)
367 sldi r3,r3,32 /* bits 11-13 are used for ppr */
368 std r3,TASKTHREADPPR(r10)
369 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
371 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
372 beq ret_from_except_lite
374 /* Re-enable interrupts */
375 #ifdef CONFIG_PPC_BOOK3E
381 #endif /* CONFIG_PPC_BOOK3E */
384 addi r3,r1,STACK_FRAME_OVERHEAD
385 bl do_syscall_trace_leave
388 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
390 /* Firstly we need to enable TM in the kernel */
393 rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
396 /* tabort, this dooms the transaction, nothing else */
397 li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
401 * Return directly to userspace. We have corrupted user register state,
402 * but userspace will never see that register state. Execution will
403 * resume after the tbegin of the aborted transaction with the
404 * checkpointed register state.
413 b . /* prevent speculative execution */
416 /* Save non-volatile GPRs, if not already saved. */
428 * The sigsuspend and rt_sigsuspend system calls can call do_signal
429 * and thus put the process into the stopped state where we might
430 * want to examine its user state with ptrace. Therefore we need
431 * to save all the nonvolatile registers (r14 - r31) before calling
432 * the C code. Similarly, fork, vfork and clone need the full
433 * register state on the stack so that it can be copied to the child.
451 _GLOBAL(ppc32_swapcontext)
453 bl compat_sys_swapcontext
456 _GLOBAL(ppc64_swapcontext)
461 _GLOBAL(ppc_switch_endian)
466 _GLOBAL(ret_from_fork)
472 _GLOBAL(ret_from_kernel_thread)
477 #ifdef PPC64_ELF_ABI_v2
485 * This routine switches between two different tasks. The process
486 * state of one is saved on its kernel stack. Then the state
487 * of the other is restored from its kernel stack. The memory
488 * management hardware is updated to the second process's state.
489 * Finally, we can return to the second process, via ret_from_except.
490 * On entry, r3 points to the THREAD for the current task, r4
491 * points to the THREAD for the new task.
493 * Note: there are two ways to get to the "going out" portion
494 * of this code; either by coming in via the entry (_switch)
495 * or via "fork" which must set up an environment equivalent
496 * to the "_switch" path. If you change this you'll have to change
497 * the fork code also.
499 * The code which creates the new task context is in 'copy_thread'
500 * in arch/powerpc/kernel/process.c
506 stdu r1,-SWITCH_FRAME_SIZE(r1)
507 /* r3-r13 are caller saved -- Cort */
510 std r0,_NIP(r1) /* Return to switch caller */
513 std r1,KSP(r3) /* Set old stack pointer */
516 /* We need a sync somewhere here to make sure that if the
517 * previous task gets rescheduled on another CPU, it sees all
518 * stores it has performed on this one.
521 #endif /* CONFIG_SMP */
524 * The kernel context switch path must contain a spin_lock,
525 * which contains larx/stcx, which will clear any reservation
526 * of the task being switched.
531 * A cp_abort (copy paste abort) here ensures that when context switching, a
532 * copy from one process can't leak into the paste of another.
535 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
537 #ifdef CONFIG_PPC_BOOK3S
538 /* Cancel all explict user streams as they will have no use after context
539 * switch and will stop the HW from creating streams itself
541 DCBT_STOP_ALL_STREAM_IDS(r6)
544 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
545 std r6,PACACURRENT(r13) /* Set new 'current' */
547 ld r8,KSP(r4) /* new stack pointer */
548 #ifdef CONFIG_PPC_STD_MMU_64
549 BEGIN_MMU_FTR_SECTION
551 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
553 clrrdi r6,r8,28 /* get its ESID */
554 clrrdi r9,r1,28 /* get current sp ESID */
556 clrrdi r6,r8,40 /* get its 1T ESID */
557 clrrdi r9,r1,40 /* get current sp 1T ESID */
558 ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
559 clrldi. r0,r6,2 /* is new ESID c00000000? */
560 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
562 beq 2f /* if yes, don't slbie it */
564 /* Bolt in the new stack SLB entry */
565 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
566 oris r0,r6,(SLB_ESID_V)@h
567 ori r0,r0,(SLB_NUM_BOLTED-1)@l
569 li r9,MMU_SEGSIZE_1T /* insert B field */
570 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
571 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
572 END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
574 /* Update the last bolted SLB. No write barriers are needed
575 * here, provided we only update the current CPU's SLB shadow
578 ld r9,PACA_SLBSHADOWPTR(r13)
580 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
581 li r12,SLBSHADOW_STACKVSID
582 STDX_BE r7,r12,r9 /* Save VSID */
583 li r12,SLBSHADOW_STACKESID
584 STDX_BE r0,r12,r9 /* Save ESID */
586 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
587 * we have 1TB segments, the only CPUs known to have the errata
588 * only support less than 1TB of system memory and we'll never
589 * actually hit this code path.
593 slbie r6 /* Workaround POWER5 < DD2.1 issue */
597 #endif /* CONFIG_PPC_STD_MMU_64 */
599 CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
600 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
601 because we don't need to leave the 288-byte ABI gap at the
602 top of the kernel stack. */
603 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
606 * PMU interrupts in radix may come in here. They will use r1, not
607 * PACAKSAVE, so this stack switch will not cause a problem. They
608 * will store to the process stack, which may then be migrated to
609 * another CPU. However the rq lock release on this CPU paired with
610 * the rq lock acquire on the new CPU before the stack becomes
611 * active on the new CPU, will order those stores.
613 mr r1,r8 /* start using new stack pointer */
614 std r7,PACAKSAVE(r13)
619 /* r3-r13 are destroyed -- Cort */
623 /* convert old thread to its task_struct for return value */
625 ld r7,_NIP(r1) /* Return to _switch caller in new task */
627 addi r1,r1,SWITCH_FRAME_SIZE
631 _GLOBAL(ret_from_except)
634 bne ret_from_except_lite
637 _GLOBAL(ret_from_except_lite)
639 * Disable interrupts so that current_thread_info()->flags
640 * can't change between when we test it and when we return
641 * from the interrupt.
643 #ifdef CONFIG_PPC_BOOK3E
647 mtmsrd r10,1 /* Update machine state */
648 #endif /* CONFIG_PPC_BOOK3E */
650 CURRENT_THREAD_INFO(r9, r1)
652 #ifdef CONFIG_PPC_BOOK3E
653 ld r10,PACACURRENT(r13)
654 #endif /* CONFIG_PPC_BOOK3E */
658 #ifdef CONFIG_PPC_BOOK3E
659 lwz r3,(THREAD+THREAD_DBCR0)(r10)
660 #endif /* CONFIG_PPC_BOOK3E */
662 /* Check current_thread_info()->flags */
663 andi. r0,r4,_TIF_USER_WORK_MASK
665 #ifdef CONFIG_PPC_BOOK3E
667 * Check to see if the dbcr0 register is set up to debug.
668 * Use the internal debug mode bit to do this.
670 andis. r0,r3,DBCR0_IDM@h
673 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
680 addi r3,r1,STACK_FRAME_OVERHEAD
684 1: andi. r0,r4,_TIF_NEED_RESCHED
686 bl restore_interrupts
688 b ret_from_except_lite
690 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
691 andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
692 bne 3f /* only restore TM if nothing else to do */
693 addi r3,r1,STACK_FRAME_OVERHEAD
700 * Use a non volatile GPR to save and restore our thread_info flags
701 * across the call to restore_interrupts.
704 bl restore_interrupts
706 addi r3,r1,STACK_FRAME_OVERHEAD
711 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
712 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
715 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
718 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
719 mr r4,r1 /* src: current exception frame */
720 mr r1,r3 /* Reroute the trampoline frame to r1 */
722 /* Copy from the original to the trampoline. */
723 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
724 li r6,0 /* start offset: 0 */
731 /* Do real store operation to complete stdu */
735 /* Clear _TIF_EMULATE_STACK_STORE flag */
736 lis r11,_TIF_EMULATE_STACK_STORE@h
744 #ifdef CONFIG_PREEMPT
745 /* Check if we need to preempt */
746 andi. r0,r4,_TIF_NEED_RESCHED
748 /* Check that preempt_count() == 0 and interrupts are enabled */
749 lwz r8,TI_PREEMPT(r9)
753 crandc eq,cr1*4+eq,eq
757 * Here we are preempting the current task. We want to make
758 * sure we are soft-disabled first and reconcile irq state.
760 RECONCILE_IRQ_STATE(r3,r4)
761 1: bl preempt_schedule_irq
763 /* Re-test flags and eventually loop */
764 CURRENT_THREAD_INFO(r9, r1)
766 andi. r0,r4,_TIF_NEED_RESCHED
770 * arch_local_irq_restore() from preempt_schedule_irq above may
771 * enable hard interrupt but we really should disable interrupts
772 * when we return from the interrupt, and so that we don't get
773 * interrupted after loading SRR0/1.
775 #ifdef CONFIG_PPC_BOOK3E
779 mtmsrd r10,1 /* Update machine state */
780 #endif /* CONFIG_PPC_BOOK3E */
781 #endif /* CONFIG_PREEMPT */
783 .globl fast_exc_return_irq
787 * This is the main kernel exit path. First we check if we
788 * are about to re-enable interrupts
791 lbz r6,PACASOFTIRQEN(r13)
795 /* We are enabling, were we already enabled ? Yes, just return */
800 * We are about to soft-enable interrupts (we are hard disabled
801 * at this point). We check if there's anything that needs to
804 lbz r0,PACAIRQHAPPENED(r13)
806 bne- restore_check_irq_replay
809 * Get here when nothing happened while soft-disabled, just
810 * soft-enable and move-on. We will hard-enable as a side
816 stb r0,PACASOFTIRQEN(r13);
819 * Final return path. BookE is handled in a different file
822 #ifdef CONFIG_PPC_BOOK3E
823 b exception_return_book3e
826 * Clear the reservation. If we know the CPU tracks the address of
827 * the reservation then we can potentially save some cycles and use
828 * a larx. On POWER6 and POWER7 this is significantly faster.
831 stdcx. r0,0,r1 /* to clear the reservation */
834 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
837 * Some code path such as load_up_fpu or altivec return directly
838 * here. They run entirely hard disabled and do not alter the
839 * interrupt state. They also don't use lwarx/stwcx. and thus
840 * are known not to leave dangling reservations.
842 .globl fast_exception_return
843 fast_exception_return:
857 /* Load PPR from thread struct before we clear MSR:RI */
859 ld r2,PACACURRENT(r13)
860 ld r2,TASKTHREADPPR(r2)
861 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
864 * Clear RI before restoring r13. If we are returning to
865 * userspace and we take an exception after restoring r13,
866 * we end up corrupting the userspace r13 value.
871 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
873 std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
876 * r13 is our per cpu area, only restore it if we are returning to
877 * userspace the value stored in the stack frame may belong to
883 mtspr SPRN_PPR,r2 /* Restore PPR */
884 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
885 ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
902 b . /* prevent speculative execution */
904 #endif /* CONFIG_PPC_BOOK3E */
907 * We are returning to a context with interrupts soft disabled.
909 * However, we may also about to hard enable, so we need to
910 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
911 * or that bit can get out of sync and bad things will happen
915 lbz r7,PACAIRQHAPPENED(r13)
918 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
919 stb r7,PACAIRQHAPPENED(r13)
921 stb r0,PACASOFTIRQEN(r13);
926 * Something did happen, check if a re-emit is needed
927 * (this also clears paca->irq_happened)
929 restore_check_irq_replay:
930 /* XXX: We could implement a fast path here where we check
931 * for irq_happened being just 0x01, in which case we can
932 * clear it and return. That means that we would potentially
933 * miss a decrementer having wrapped all the way around.
935 * Still, this might be useful for things like hash_page
937 bl __check_irq_replay
939 beq restore_no_replay
942 * We need to re-emit an interrupt. We do so by re-using our
943 * existing exception frame. We first change the trap value,
944 * but we need to ensure we preserve the low nibble of it
952 * Then find the right handler and call it. Interrupts are
953 * still soft-disabled and we keep them that way.
957 addi r3,r1,STACK_FRAME_OVERHEAD;
960 1: cmpwi cr0,r3,0xe60
962 addi r3,r1,STACK_FRAME_OVERHEAD;
963 bl handle_hmi_exception
965 1: cmpwi cr0,r3,0x900
967 addi r3,r1,STACK_FRAME_OVERHEAD;
970 #ifdef CONFIG_PPC_DOORBELL
972 #ifdef CONFIG_PPC_BOOK3E
979 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
980 #endif /* CONFIG_PPC_BOOK3E */
982 addi r3,r1,STACK_FRAME_OVERHEAD;
983 bl doorbell_exception
985 #endif /* CONFIG_PPC_DOORBELL */
986 1: b ret_from_except /* What else to do here ? */
989 addi r3,r1,STACK_FRAME_OVERHEAD
990 bl unrecoverable_exception
993 #ifdef CONFIG_PPC_RTAS
995 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
996 * called with the MMU off.
998 * In addition, we need to be in 32b mode, at least for now.
1000 * Note: r3 is an input parameter to rtas, so don't trash it...
1005 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
1007 /* Because RTAS is running in 32b mode, it clobbers the high order half
1008 * of all registers that it saves. We therefore save those registers
1009 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
1011 SAVE_GPR(2, r1) /* Save the TOC */
1012 SAVE_GPR(13, r1) /* Save paca */
1013 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
1014 SAVE_10GPRS(22, r1) /* ditto */
1027 /* Temporary workaround to clear CR until RTAS can be modified to
1034 /* There is no way it is acceptable to get here with interrupts enabled,
1035 * check it with the asm equivalent of WARN_ON
1037 lbz r0,PACASOFTIRQEN(r13)
1039 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1042 /* Hard-disable interrupts */
1048 /* Unfortunately, the stack pointer and the MSR are also clobbered,
1049 * so they are saved in the PACA which allows us to restore
1050 * our original state after RTAS returns.
1053 std r6,PACASAVEDMSR(r13)
1055 /* Setup our real return addr */
1056 LOAD_REG_ADDR(r4,rtas_return_loc)
1057 clrldi r4,r4,2 /* convert to realmode address */
1061 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1065 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1066 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1068 sync /* disable interrupts so SRR0/1 */
1069 mtmsrd r0 /* don't get trashed */
1071 LOAD_REG_ADDR(r4, rtas)
1072 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
1073 ld r4,RTASBASE(r4) /* get the rtas->base value */
1078 b . /* prevent speculative execution */
1083 /* relocation is off at this point */
1085 clrldi r4,r4,2 /* convert to realmode address */
1089 ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
1097 ld r1,PACAR1(r4) /* Restore our SP */
1098 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
1103 b . /* prevent speculative execution */
1106 1: .llong rtas_restore_regs
1109 /* relocation is on at this point */
1110 REST_GPR(2, r1) /* Restore the TOC */
1111 REST_GPR(13, r1) /* Restore paca */
1112 REST_8GPRS(14, r1) /* Restore the non-volatiles */
1113 REST_10GPRS(22, r1) /* ditto */
1128 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
1129 ld r0,16(r1) /* get return address */
1132 blr /* return to caller */
1134 #endif /* CONFIG_PPC_RTAS */
1139 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1141 /* Because PROM is running in 32b mode, it clobbers the high order half
1142 * of all registers that it saves. We therefore save those registers
1143 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
1154 /* Put PROM address in SRR0 */
1157 /* Setup our trampoline return addr in LR */
1160 addi r4,r4,(1f - 0b)
1163 /* Prepare a 32-bit mode big endian MSR
1165 #ifdef CONFIG_PPC_BOOK3E
1166 rlwinm r11,r11,0,1,31
1169 #else /* CONFIG_PPC_BOOK3E */
1170 LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1174 #endif /* CONFIG_PPC_BOOK3E */
1176 1: /* Return from OF */
1179 /* Just make sure that r1 top 32 bits didn't get
1184 /* Restore the MSR (back to 64 bits) */
1189 /* Restore other registers */
1197 addi r1,r1,PROM_FRAME_SIZE