ARM: convert all "mov.* pc, reg" to "bx reg" for ARMv6+
[sfrench/cifs-2.6.git] / arch / arm / kernel / entry-armv.S
index 52a949a8077d3329fc674cbbda7645673e484371..36276cdccfbc71e9e69de18fe21a00ac25ddbd07 100644 (file)
@@ -224,7 +224,7 @@ svc_preempt:
 1:     bl      preempt_schedule_irq            @ irq en/disable is done inside
        ldr     r0, [tsk, #TI_FLAGS]            @ get new tasks TI_FLAGS
        tst     r0, #_TIF_NEED_RESCHED
-       moveq   pc, r8                          @ go again
+       reteq   r8                              @ go again
        b       1b
 #endif
 
@@ -490,7 +490,7 @@ ENDPROC(__und_usr)
        .pushsection .fixup, "ax"
        .align  2
 4:     str     r4, [sp, #S_PC]                 @ retry current instruction
-       mov     pc, r9
+       ret     r9
        .popsection
        .pushsection __ex_table,"a"
        .long   1b, 4b
@@ -552,7 +552,7 @@ call_fpe:
 #endif
        tst     r0, #0x08000000                 @ only CDP/CPRT/LDC/STC have bit 27
        tstne   r0, #0x04000000                 @ bit 26 set on both ARM and Thumb-2
-       moveq   pc, lr
+       reteq   lr
        and     r8, r0, #0x00000f00             @ mask out CP number
  THUMB(        lsr     r8, r8, #8              )
        mov     r7, #1
@@ -571,33 +571,33 @@ call_fpe:
  THUMB(        add     pc, r8                  )
        nop
 
-       movw_pc lr                              @ CP#0
+       ret.w   lr                              @ CP#0
        W(b)    do_fpe                          @ CP#1 (FPE)
        W(b)    do_fpe                          @ CP#2 (FPE)
-       movw_pc lr                              @ CP#3
+       ret.w   lr                              @ CP#3
 #ifdef CONFIG_CRUNCH
        b       crunch_task_enable              @ CP#4 (MaverickCrunch)
        b       crunch_task_enable              @ CP#5 (MaverickCrunch)
        b       crunch_task_enable              @ CP#6 (MaverickCrunch)
 #else
-       movw_pc lr                              @ CP#4
-       movw_pc lr                              @ CP#5
-       movw_pc lr                              @ CP#6
+       ret.w   lr                              @ CP#4
+       ret.w   lr                              @ CP#5
+       ret.w   lr                              @ CP#6
 #endif
-       movw_pc lr                              @ CP#7
-       movw_pc lr                              @ CP#8
-       movw_pc lr                              @ CP#9
+       ret.w   lr                              @ CP#7
+       ret.w   lr                              @ CP#8
+       ret.w   lr                              @ CP#9
 #ifdef CONFIG_VFP
        W(b)    do_vfp                          @ CP#10 (VFP)
        W(b)    do_vfp                          @ CP#11 (VFP)
 #else
-       movw_pc lr                              @ CP#10 (VFP)
-       movw_pc lr                              @ CP#11 (VFP)
+       ret.w   lr                              @ CP#10 (VFP)
+       ret.w   lr                              @ CP#11 (VFP)
 #endif
-       movw_pc lr                              @ CP#12
-       movw_pc lr                              @ CP#13
-       movw_pc lr                              @ CP#14 (Debug)
-       movw_pc lr                              @ CP#15 (Control)
+       ret.w   lr                              @ CP#12
+       ret.w   lr                              @ CP#13
+       ret.w   lr                              @ CP#14 (Debug)
+       ret.w   lr                              @ CP#15 (Control)
 
 #ifdef NEED_CPU_ARCHITECTURE
        .align  2
@@ -649,7 +649,7 @@ ENTRY(fp_enter)
        .popsection
 
 ENTRY(no_fp)
-       mov     pc, lr
+       ret     lr
 ENDPROC(no_fp)
 
 __und_usr_fault_32:
@@ -745,7 +745,7 @@ ENDPROC(__switch_to)
 #ifdef CONFIG_ARM_THUMB
        bx      \reg
 #else
-       mov     pc, \reg
+       ret     \reg
 #endif
        .endm
 
@@ -837,7 +837,7 @@ kuser_cmpxchg64_fixup:
 #if __LINUX_ARM_ARCH__ < 6
        bcc     kuser_cmpxchg32_fixup
 #endif
-       mov     pc, lr
+       ret     lr
        .previous
 
 #else
@@ -905,7 +905,7 @@ kuser_cmpxchg32_fixup:
        subs    r8, r4, r7
        rsbcss  r8, r8, #(2b - 1b)
        strcs   r7, [sp, #S_PC]
-       mov     pc, lr
+       ret     lr
        .previous
 
 #else