2 * Low-level system-call handling, trap handlers and context-switching
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002 NEC Corporation
8 * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
18 #include <linux/sys.h>
19 #include <linux/linkage.h>
21 #include <asm/entry.h>
22 #include <asm/current.h>
23 #include <asm/processor.h>
24 #include <asm/exceptions.h>
25 #include <asm/asm-offsets.h>
26 #include <asm/thread_info.h>
29 #include <asm/unistd.h>
31 #include <linux/errno.h>
32 #include <asm/signal.h>
36 /* The size of a state save frame. */
37 #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
39 /* The offset of the struct pt_regs in a `state save frame' on the stack. */
40 #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
42 #define C_ENTRY(name) .globl name; .align 4; name
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
49 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
95 msrclr r11, MSR_VMS | MSR_UMS
102 andi r11, r11, ~MSR_BIP
110 ori r11, r11, MSR_BIP
118 andi r11, r11, ~MSR_EIP
134 andi r11, r11, ~MSR_IE
150 ori r11, r11, MSR_VMS
151 andni r11, r11, MSR_UMS
159 ori r11, r11, MSR_VMS
160 andni r11, r11, MSR_UMS
168 andni r11, r11, (MSR_VMS|MSR_UMS)
174 /* Define how to call high-level functions. With MMU, virtual mode must be
175 * enabled when calling the high-level function. Clobbers R11.
176 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
179 /* turn on virtual protected mode save */
186 /* turn off virtual protected mode save and user mode save*/
189 rted r0, TOPHYS(1f); \
194 swi r2, r1, PTO+PT_R2; /* Save SDA */ \
195 swi r3, r1, PTO+PT_R3; \
196 swi r4, r1, PTO+PT_R4; \
197 swi r5, r1, PTO+PT_R5; \
198 swi r6, r1, PTO+PT_R6; \
199 swi r7, r1, PTO+PT_R7; \
200 swi r8, r1, PTO+PT_R8; \
201 swi r9, r1, PTO+PT_R9; \
202 swi r10, r1, PTO+PT_R10; \
203 swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
204 swi r12, r1, PTO+PT_R12; \
205 swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
206 swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
207 swi r15, r1, PTO+PT_R15; /* Save LP */ \
208 swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
209 swi r19, r1, PTO+PT_R19; \
210 swi r20, r1, PTO+PT_R20; \
211 swi r21, r1, PTO+PT_R21; \
212 swi r22, r1, PTO+PT_R22; \
213 swi r23, r1, PTO+PT_R23; \
214 swi r24, r1, PTO+PT_R24; \
215 swi r25, r1, PTO+PT_R25; \
216 swi r26, r1, PTO+PT_R26; \
217 swi r27, r1, PTO+PT_R27; \
218 swi r28, r1, PTO+PT_R28; \
219 swi r29, r1, PTO+PT_R29; \
220 swi r30, r1, PTO+PT_R30; \
221 swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
222 mfs r11, rmsr; /* save MSR */ \
224 swi r11, r1, PTO+PT_MSR;
226 #define RESTORE_REGS \
227 lwi r11, r1, PTO+PT_MSR; \
230 lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
231 lwi r3, r1, PTO+PT_R3; \
232 lwi r4, r1, PTO+PT_R4; \
233 lwi r5, r1, PTO+PT_R5; \
234 lwi r6, r1, PTO+PT_R6; \
235 lwi r7, r1, PTO+PT_R7; \
236 lwi r8, r1, PTO+PT_R8; \
237 lwi r9, r1, PTO+PT_R9; \
238 lwi r10, r1, PTO+PT_R10; \
239 lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
240 lwi r12, r1, PTO+PT_R12; \
241 lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
242 lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
243 lwi r15, r1, PTO+PT_R15; /* restore LP */ \
244 lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
245 lwi r19, r1, PTO+PT_R19; \
246 lwi r20, r1, PTO+PT_R20; \
247 lwi r21, r1, PTO+PT_R21; \
248 lwi r22, r1, PTO+PT_R22; \
249 lwi r23, r1, PTO+PT_R23; \
250 lwi r24, r1, PTO+PT_R24; \
251 lwi r25, r1, PTO+PT_R25; \
252 lwi r26, r1, PTO+PT_R26; \
253 lwi r27, r1, PTO+PT_R27; \
254 lwi r28, r1, PTO+PT_R28; \
255 lwi r29, r1, PTO+PT_R29; \
256 lwi r30, r1, PTO+PT_R30; \
257 lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
264 * System calls are handled here.
267 * Syscall number in r12, args in r5-r10
270 * Trap entered via brki instruction, so BIP bit is set, and interrupts
271 * are masked. This is nice, means we don't have to CLI before state save
273 C_ENTRY(_user_exception):
274 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
275 addi r14, r14, 4 /* return address is 4 byte after call */
282 /* Kernel-mode state save - kernel execve */
283 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
286 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
289 addi r11, r0, 1; /* Was in kernel-mode. */
290 swi r11, r1, PTO+PT_MODE; /* pt_regs -> kernel mode */
292 nop; /* Fill delay slot */
294 /* User-mode state save. */
296 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
298 lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
299 /* calculate kernel stack pointer from task struct 8k */
300 addik r1, r1, THREAD_SIZE;
303 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
306 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
307 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
308 swi r11, r1, PTO+PT_R1; /* Store user SP. */
309 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
310 /* Save away the syscall number. */
311 swi r12, r1, PTO+PT_R0;
314 /* where the trap should return need -8 to adjust for rtsd r15, 8*/
315 /* Jump to the appropriate function for the system call number in r12
316 * (r12 is not preserved), or return an error if r12 is not valid. The LP
317 * register should point to the location where
318 * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
320 # Step into virtual mode.
326 lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
327 lwi r11, r11, TI_FLAGS /* get flags in thread info */
328 andi r11, r11, _TIF_WORK_SYSCALL_MASK
331 addik r3, r0, -ENOSYS
332 swi r3, r1, PTO + PT_R3
333 brlid r15, do_syscall_trace_enter
334 addik r5, r1, PTO + PT_R0
336 # do_syscall_trace_enter returns the new syscall nr.
338 lwi r5, r1, PTO+PT_R5;
339 lwi r6, r1, PTO+PT_R6;
340 lwi r7, r1, PTO+PT_R7;
341 lwi r8, r1, PTO+PT_R8;
342 lwi r9, r1, PTO+PT_R9;
343 lwi r10, r1, PTO+PT_R10;
345 /* Jump to the appropriate function for the system call number in r12
346 * (r12 is not preserved), or return an error if r12 is not valid.
347 * The LP register should point to the location where the called function
348 * should return. [note that MAKE_SYS_CALL uses label 1] */
349 /* See if the system call number is valid */
350 addi r11, r12, -__NR_syscalls;
352 /* Figure out which function to use for this system call. */
353 /* Note Microblaze barrel shift is optional, so don't rely on it */
354 add r12, r12, r12; /* convert num -> ptr */
358 /* Trac syscalls and stored them to r0_ram */
359 lwi r3, r12, 0x400 + r0_ram
361 swi r3, r12, 0x400 + r0_ram
364 # Find and jump into the syscall handler.
365 lwi r12, r12, sys_call_table
366 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
367 addi r15, r0, ret_from_trap-8
370 /* The syscall number is invalid, return an error. */
372 addi r3, r0, -ENOSYS;
373 rtsd r15,8; /* looks like a normal subroutine return */
377 /* Entry point used to return from a syscall/trap */
378 /* We re-enable BIP bit before state restore */
379 C_ENTRY(ret_from_trap):
380 swi r3, r1, PTO + PT_R3
381 swi r4, r1, PTO + PT_R4
383 lwi r11, r1, PTO+PT_MODE;
384 /* See if returning to kernel mode, if so, skip resched &c. */
386 /* We're returning to user mode, so check for various conditions that
387 * trigger rescheduling. */
388 /* FIXME: Restructure all these flag checks. */
389 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
390 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
391 andi r11, r11, _TIF_WORK_SYSCALL_MASK
394 brlid r15, do_syscall_trace_leave
395 addik r5, r1, PTO + PT_R0
397 /* We're returning to user mode, so check for various conditions that
398 * trigger rescheduling. */
399 /* get thread info from current task */
400 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
401 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
402 andi r11, r11, _TIF_NEED_RESCHED;
405 bralid r15, schedule; /* Call scheduler */
406 nop; /* delay slot */
408 /* Maybe handle a signal */
409 5: /* get thread info from current task*/
410 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
411 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
412 andi r11, r11, _TIF_SIGPENDING;
413 beqi r11, 1f; /* Signals to handle, handle them */
415 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
416 addi r7, r0, 1; /* Arg 3: int in_syscall */
417 bralid r15, do_signal; /* Handle any signals */
418 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
420 /* Finally, return to user state. */
421 1: set_bip; /* Ints masked for state restore */
422 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
426 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
427 lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
430 /* Return to kernel state. */
431 2: set_bip; /* Ints masked for state restore */
435 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
438 TRAP_return: /* Make global symbol for debugging */
439 rtbd r14, 0; /* Instructions to return from an IRQ */
443 /* These syscalls need access to the struct pt_regs on the stack, so we
444 implement them in assembly (they're basically all wrappers anyway). */
446 C_ENTRY(sys_fork_wrapper):
447 addi r5, r0, SIGCHLD /* Arg 0: flags */
448 lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
449 addik r7, r1, PTO /* Arg 2: parent context */
450 add r8. r0, r0 /* Arg 3: (unused) */
451 add r9, r0, r0; /* Arg 4: (unused) */
452 add r10, r0, r0; /* Arg 5: (unused) */
453 brid do_fork /* Do real work (tail-call) */
456 /* This the initial entry point for a new child thread, with an appropriate
457 stack in place that makes it look the the child is in the middle of an
458 syscall. This function is actually `returned to' from switch_thread
459 (copy_thread makes ret_from_fork the return address in each new thread's
461 C_ENTRY(ret_from_fork):
462 bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
463 add r3, r5, r0; /* switch_thread returns the prev task */
464 /* ( in the delay slot ) */
465 add r3, r0, r0; /* Child's fork call should return 0. */
466 brid ret_from_trap; /* Do normal trap return */
470 brid microblaze_vfork /* Do real work (tail-call) */
474 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
475 lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
476 1: addik r7, r1, PTO; /* Arg 2: parent context */
477 add r8, r0, r0; /* Arg 3: (unused) */
478 add r9, r0, r0; /* Arg 4: (unused) */
479 add r10, r0, r0; /* Arg 5: (unused) */
480 brid do_fork /* Do real work (tail-call) */
484 addik r8, r1, PTO; /* add user context as 4th arg */
485 brid microblaze_execve; /* Do real work (tail-call).*/
488 C_ENTRY(sys_rt_sigreturn_wrapper):
489 swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
490 swi r4, r1, PTO+PT_R4;
491 addik r5, r1, PTO; /* add user context as 1st arg */
492 brlid r15, sys_rt_sigreturn /* Do real work */
494 lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
495 lwi r4, r1, PTO+PT_R4;
496 bri ret_from_trap /* fall through will not work here due to align */
500 * HW EXCEPTION rutine start
504 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
505 /* See if already in kernel mode.*/ \
508 andi r1, r1, MSR_UMS; \
510 /* Kernel-mode state save. */ \
511 /* Reload kernel stack-ptr. */ \
512 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
514 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
516 /* PC, before IRQ/trap - this is one instruction above */ \
517 swi r17, r1, PTO+PT_PC; \
519 addi r11, r0, 1; /* Was in kernel-mode. */ \
520 swi r11, r1, PTO+PT_MODE; \
522 nop; /* Fill delay slot */ \
523 1: /* User-mode state save. */ \
524 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
526 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
527 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */\
529 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */\
531 /* PC, before IRQ/trap - this is one instruction above FIXME*/ \
532 swi r17, r1, PTO+PT_PC; \
534 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */ \
535 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
536 swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
537 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); \
540 C_ENTRY(full_exception_trap):
541 /* adjust exception address for privileged instruction
542 * for finding where is it */
544 SAVE_STATE /* Save registers */
545 /* FIXME this can be store directly in PT_ESR reg.
546 * I tested it but there is a fault */
547 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
548 addik r15, r0, ret_from_exc - 8
549 addik r5, r1, PTO /* parameter struct pt_regs * regs */
552 mfs r7, rfsr; /* save FSR */
554 mts rfsr, r0; /* Clear sticky fsr */
556 addik r12, r0, full_exception
562 * Unaligned data trap.
564 * Unaligned data trap last on 4k page is handled here.
566 * Trap entered via exception, so EE bit is set, and interrupts
567 * are masked. This is nice, means we don't have to CLI before state save
569 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
571 C_ENTRY(unaligned_data_trap):
572 /* MS: I have to save r11 value and then restore it because
573 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
574 * instructions are not used. We don't need to do if MSR instructions
575 * are used and they use r0 instead of r11.
576 * I am using ENTRY_SP which should be primary used only for stack
578 swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
579 set_bip; /* equalize initial state for all possible entries */
582 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
583 SAVE_STATE /* Save registers.*/
584 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
585 addik r15, r0, ret_from_exc-8
586 mfs r3, resr /* ESR */
588 mfs r4, rear /* EAR */
590 addik r7, r1, PTO /* parameter struct pt_regs * regs */
591 addik r12, r0, _unaligned_data_exception
593 rtbd r12, 0; /* interrupts enabled */
599 * If the real exception handler (from hw_exception_handler.S) didn't find
600 * the mapping for the process, then we're thrown here to handle such situation.
602 * Trap entered via exceptions, so EE bit is set, and interrupts
603 * are masked. This is nice, means we don't have to CLI before state save
605 * Build a standard exception frame for TLB Access errors. All TLB exceptions
606 * will bail out to this point if they can't resolve the lightweight TLB fault.
608 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
609 * void do_page_fault(struct pt_regs *regs,
610 * unsigned long address,
611 * unsigned long error_code)
613 /* data and intruction trap - which is choose is resolved int fault.c */
614 C_ENTRY(page_fault_data_trap):
615 SAVE_STATE /* Save registers.*/
616 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
617 addik r15, r0, ret_from_exc-8
618 addik r5, r1, PTO /* parameter struct pt_regs * regs */
619 mfs r6, rear /* parameter unsigned long address */
621 mfs r7, resr /* parameter unsigned long error_code */
623 addik r12, r0, do_page_fault
625 rted r12, 0; /* interrupts enabled */
628 C_ENTRY(page_fault_instr_trap):
629 SAVE_STATE /* Save registers.*/
630 /* where the trap should return need -8 to adjust for rtsd r15, 8 */
631 addik r15, r0, ret_from_exc-8
632 addik r5, r1, PTO /* parameter struct pt_regs * regs */
633 mfs r6, rear /* parameter unsigned long address */
635 ori r7, r0, 0 /* parameter unsigned long error_code */
636 addik r12, r0, do_page_fault
638 rted r12, 0; /* interrupts enabled */
641 /* Entry point used to return from an exception. */
642 C_ENTRY(ret_from_exc):
643 lwi r11, r1, PTO+PT_MODE;
644 bnei r11, 2f; /* See if returning to kernel mode, */
645 /* ... if so, skip resched &c. */
647 /* We're returning to user mode, so check for various conditions that
648 trigger rescheduling. */
649 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
650 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
651 andi r11, r11, _TIF_NEED_RESCHED;
654 /* Call the scheduler before returning from a syscall/trap. */
655 bralid r15, schedule; /* Call scheduler */
656 nop; /* delay slot */
658 /* Maybe handle a signal */
659 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
660 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
661 andi r11, r11, _TIF_SIGPENDING;
662 beqi r11, 1f; /* Signals to handle, handle them */
665 * Handle a signal return; Pending signals should be in r18.
667 * Not all registers are saved by the normal trap/interrupt entry
668 * points (for instance, call-saved registers (because the normal
669 * C-compiler calling sequence in the kernel makes sure they're
670 * preserved), and call-clobbered registers in the case of
671 * traps), but signal handlers may want to examine or change the
672 * complete register state. Here we save anything not saved by
673 * the normal entry sequence, so that it may be safely restored
674 * (in a possibly modified form) after do_signal returns. */
675 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
676 addi r7, r0, 0; /* Arg 3: int in_syscall */
677 bralid r15, do_signal; /* Handle any signals */
678 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
680 /* Finally, return to user state. */
681 1: set_bip; /* Ints masked for state restore */
682 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
687 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
689 lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
691 /* Return to kernel state. */
692 2: set_bip; /* Ints masked for state restore */
696 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
700 EXC_return: /* Make global symbol for debugging */
701 rtbd r14, 0; /* Instructions to return from an IRQ */
705 * HW EXCEPTION rutine end
709 * Hardware maskable interrupts.
711 * The stack-pointer (r1) should have already been saved to the memory
712 * location PER_CPU(ENTRY_SP).
715 /* MS: we are in physical address */
716 /* Save registers, switch to proper stack, convert SP to virtual.*/
717 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
718 /* MS: See if already in kernel mode. */
724 /* Kernel-mode state save. */
725 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
726 tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
728 /* MS: Make room on the stack -> activation record */
729 addik r1, r1, -STATE_SAVE_SIZE;
732 addi r11, r0, 1; /* MS: Was in kernel-mode. */
733 swi r11, r1, PTO + PT_MODE; /* MS: and save it */
735 nop; /* MS: Fill delay slot */
738 /* User-mode state save. */
739 /* MS: get the saved current */
740 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
742 lwi r1, r1, TS_THREAD_INFO;
743 addik r1, r1, THREAD_SIZE;
746 addik r1, r1, -STATE_SAVE_SIZE;
749 swi r0, r1, PTO + PT_MODE;
750 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
751 swi r11, r1, PTO+PT_R1;
753 lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
757 addik r11, r0, do_IRQ;
758 addik r15, r0, irq_call;
759 irq_call:rtbd r11, 0;
762 /* MS: we are in virtual mode */
764 lwi r11, r1, PTO + PT_MODE;
767 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
768 lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
769 andi r11, r11, _TIF_NEED_RESCHED;
771 bralid r15, schedule;
772 nop; /* delay slot */
774 /* Maybe handle a signal */
775 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
776 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
777 andi r11, r11, _TIF_SIGPENDING;
778 beqid r11, no_intr_resched
779 /* Handle a signal return; Pending signals should be in r18. */
780 addi r7, r0, 0; /* Arg 3: int in_syscall */
781 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
782 bralid r15, do_signal; /* Handle any signals */
783 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
785 /* Finally, return to user state. */
787 /* Disable interrupts, we are now committed to the state restore */
789 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
793 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
794 lwi r1, r1, PT_R1 - PT_SIZE;
796 /* MS: Return to kernel state. */
798 #ifdef CONFIG_PREEMPT
799 lwi r11, CURRENT_TASK, TS_THREAD_INFO;
800 /* MS: get preempt_count from thread info */
801 lwi r5, r11, TI_PREEMPT_COUNT;
804 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
805 andi r5, r5, _TIF_NEED_RESCHED;
806 beqi r5, restore /* if zero jump over */
809 /* interrupts are off that's why I am calling preempt_chedule_irq */
810 bralid r15, preempt_schedule_irq
812 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
813 lwi r5, r11, TI_FLAGS; /* get flags in thread info */
814 andi r5, r5, _TIF_NEED_RESCHED;
815 bnei r5, preempt /* if non zero jump to resched */
818 VM_OFF /* MS: turn off MMU */
821 addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
824 IRQ_return: /* MS: Make global symbol for debugging */
830 * We enter dbtrap in "BIP" (breakpoint) mode.
831 * So we exit the breakpoint mode with an 'rtbd' and proceed with the
833 * however, wait to save state first
835 C_ENTRY(_debug_exception):
836 /* BIP bit is set on entry, no interrupts can occur */
837 swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
843 /* Kernel-mode state save. */
844 lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
847 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
850 addi r11, r0, 1; /* Was in kernel-mode. */
851 swi r11, r1, PTO + PT_MODE;
853 nop; /* Fill delay slot */
854 1: /* User-mode state save. */
855 lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
857 lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
858 addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
861 addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
864 swi r0, r1, PTO+PT_MODE; /* Was in user-mode. */
865 lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
866 swi r11, r1, PTO+PT_R1; /* Store user SP. */
871 addi r5, r0, SIGTRAP /* send the trap signal */
872 add r6, r0, CURRENT_TASK; /* Get current task ptr into r11 */
873 addk r7, r0, r0 /* 3rd param zero */
874 dbtrap_call: rtbd r0, send_sig;
875 addik r15, r0, dbtrap_call;
877 set_bip; /* Ints masked for state restore*/
878 lwi r11, r1, PTO+PT_MODE;
881 /* Get current task ptr into r11 */
882 lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
883 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
884 andi r11, r11, _TIF_NEED_RESCHED;
887 /* Call the scheduler before returning from a syscall/trap. */
889 bralid r15, schedule; /* Call scheduler */
890 nop; /* delay slot */
891 /* XXX Is PT_DTRACE handling needed here? */
892 /* XXX m68knommu also checks TASK_STATE & TASK_COUNTER here. */
894 /* Maybe handle a signal */
895 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
896 lwi r11, r11, TI_FLAGS; /* get flags in thread info */
897 andi r11, r11, _TIF_SIGPENDING;
898 beqi r11, 1f; /* Signals to handle, handle them */
900 /* Handle a signal return; Pending signals should be in r18. */
901 /* Not all registers are saved by the normal trap/interrupt entry
902 points (for instance, call-saved registers (because the normal
903 C-compiler calling sequence in the kernel makes sure they're
904 preserved), and call-clobbered registers in the case of
905 traps), but signal handlers may want to examine or change the
906 complete register state. Here we save anything not saved by
907 the normal entry sequence, so that it may be safely restored
908 (in a possibly modified form) after do_signal returns. */
910 addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
911 addi r7, r0, 0; /* Arg 3: int in_syscall */
912 bralid r15, do_signal; /* Handle any signals */
913 add r6, r0, r0; /* Arg 2: sigset_t *oldset */
916 /* Finally, return to user state. */
918 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
923 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
926 lwi r1, r1, PT_R1 - PT_SIZE;
927 /* Restore user stack pointer. */
930 /* Return to kernel state. */
934 addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
938 DBTRAP_return: /* Make global symbol for debugging */
939 rtbd r14, 0; /* Instructions to return from an IRQ */
945 /* prepare return value */
946 addk r3, r0, CURRENT_TASK
948 /* save registers in cpu_context */
949 /* use r11 and r12, volatile registers, as temp register */
950 /* give start of cpu_context for previous process */
951 addik r11, r5, TI_CPU_CONTEXT
954 /* skip volatile registers.
955 * they are saved on stack when we jumped to _switch_to() */
956 /* dedicated registers */
963 /* save non-volatile registers */
976 /* special purpose registers */
990 /* update r31, the current-give me pointer to task which will be next */
991 lwi CURRENT_TASK, r6, TI_TASK
992 /* stored it to current_save too */
993 swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
995 /* get new process' cpu context and restore */
996 /* give me start where start context of next task */
997 addik r11, r6, TI_CPU_CONTEXT
999 /* non-volatile registers */
1000 lwi r30, r11, CC_R30
1001 lwi r29, r11, CC_R29
1002 lwi r28, r11, CC_R28
1003 lwi r27, r11, CC_R27
1004 lwi r26, r11, CC_R26
1005 lwi r25, r11, CC_R25
1006 lwi r24, r11, CC_R24
1007 lwi r23, r11, CC_R23
1008 lwi r22, r11, CC_R22
1009 lwi r21, r11, CC_R21
1010 lwi r20, r11, CC_R20
1011 lwi r19, r11, CC_R19
1012 /* dedicated registers */
1013 lwi r18, r11, CC_R18
1014 lwi r17, r11, CC_R17
1015 lwi r16, r11, CC_R16
1016 lwi r15, r11, CC_R15
1017 lwi r14, r11, CC_R14
1018 lwi r13, r11, CC_R13
1019 /* skip volatile registers */
1023 /* special purpose registers */
1024 lwi r12, r11, CC_FSR
1027 lwi r12, r11, CC_MSR
1035 brai 0x70; /* Jump back to FS-boot */
1040 swi r5, r0, 0x250 + TOPHYS(r0_ram)
1043 swi r5, r0, 0x254 + TOPHYS(r0_ram)
1046 /* These are compiled and loaded into high memory, then
1047 * copied into place in mach_early_setup */
1048 .section .init.ivt, "ax"
1050 /* this is very important - here is the reset vector */
1051 /* in current MMU branch you don't care what is here - it is
1052 * used from bootloader site - but this is correct for FS-BOOT */
1055 brai TOPHYS(_user_exception); /* syscall handler */
1056 brai TOPHYS(_interrupt); /* Interrupt handler */
1057 brai TOPHYS(_break); /* nmi trap handler */
1058 brai TOPHYS(_hw_exception_handler); /* HW exception handler */
1061 brai TOPHYS(_debug_exception); /* debug trap handler*/
1063 .section .rodata,"a"
1064 #include "syscall_table.S"
1066 syscall_table_size=(.-sys_call_table)
1073 .ascii "IRQ (PREEMPTED)\0"
1074 type_SYSCALL_PREEMPT:
1075 .ascii " SYSCALL (PREEMPTED)\0"
1078 * Trap decoding for stack unwinder
1079 * Tuples are (start addr, end addr, string)
1080 * If return address lies on [start addr, end addr],
1081 * unwinder displays 'string'
1085 .global microblaze_trap_handlers
1086 microblaze_trap_handlers:
1087 /* Exact matches come first */
1088 .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
1089 .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
1090 /* Fuzzy matches go here */
1091 .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1092 .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
1094 .word 0 ; .word 0 ; .word 0