ARM: convert all "mov.* pc, reg" to "bx reg" for ARMv6+
[sfrench/cifs-2.6.git] / arch / arm / mach-ep93xx / crunch-bits.S
1 /*
2  * arch/arm/kernel/crunch-bits.S
3  * Cirrus MaverickCrunch context switching and handling
4  *
5  * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6  *
7  * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
8  * Copyright (c) 2003-2004, MontaVista Software, Inc.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/linkage.h>
16 #include <asm/ptrace.h>
17 #include <asm/thread_info.h>
18 #include <asm/asm-offsets.h>
19 #include <asm/assembler.h>
20 #include <mach/ep93xx-regs.h>
21
22 /*
23  * We can't use hex constants here due to a bug in gas.
24  */
25 #define CRUNCH_MVDX0            0
26 #define CRUNCH_MVDX1            8
27 #define CRUNCH_MVDX2            16
28 #define CRUNCH_MVDX3            24
29 #define CRUNCH_MVDX4            32
30 #define CRUNCH_MVDX5            40
31 #define CRUNCH_MVDX6            48
32 #define CRUNCH_MVDX7            56
33 #define CRUNCH_MVDX8            64
34 #define CRUNCH_MVDX9            72
35 #define CRUNCH_MVDX10           80
36 #define CRUNCH_MVDX11           88
37 #define CRUNCH_MVDX12           96
38 #define CRUNCH_MVDX13           104
39 #define CRUNCH_MVDX14           112
40 #define CRUNCH_MVDX15           120
41 #define CRUNCH_MVAX0L           128
42 #define CRUNCH_MVAX0M           132
43 #define CRUNCH_MVAX0H           136
44 #define CRUNCH_MVAX1L           140
45 #define CRUNCH_MVAX1M           144
46 #define CRUNCH_MVAX1H           148
47 #define CRUNCH_MVAX2L           152
48 #define CRUNCH_MVAX2M           156
49 #define CRUNCH_MVAX2H           160
50 #define CRUNCH_MVAX3L           164
51 #define CRUNCH_MVAX3M           168
52 #define CRUNCH_MVAX3H           172
53 #define CRUNCH_DSPSC            176
54
55 #define CRUNCH_SIZE             184
56
57         .text
58
59 /*
60  * Lazy switching of crunch coprocessor context
61  *
62  * r10 = struct thread_info pointer
63  * r9  = ret_from_exception
64  * lr  = undefined instr exit
65  *
66  * called from prefetch exception handler with interrupts enabled
67  */
68 ENTRY(crunch_task_enable)
69         inc_preempt_count r10, r3
70
71         ldr     r8, =(EP93XX_APB_VIRT_BASE + 0x00130000)        @ syscon addr
72
73         ldr     r1, [r8, #0x80]
74         tst     r1, #0x00800000                 @ access to crunch enabled?
75         bne     2f                              @ if so no business here
76         mov     r3, #0xaa                       @ unlock syscon swlock
77         str     r3, [r8, #0xc0]
78         orr     r1, r1, #0x00800000             @ enable access to crunch
79         str     r1, [r8, #0x80]
80
81         ldr     r3, =crunch_owner
82         add     r0, r10, #TI_CRUNCH_STATE       @ get task crunch save area
83         ldr     r2, [sp, #60]                   @ current task pc value
84         ldr     r1, [r3]                        @ get current crunch owner
85         str     r0, [r3]                        @ this task now owns crunch
86         sub     r2, r2, #4                      @ adjust pc back
87         str     r2, [sp, #60]
88
89         ldr     r2, [r8, #0x80]
90         mov     r2, r2                          @ flush out enable (@@@)
91
92         teq     r1, #0                          @ test for last ownership
93         mov     lr, r9                          @ normal exit from exception
94         beq     crunch_load                     @ no owner, skip save
95
96 crunch_save:
97         cfstr64         mvdx0, [r1, #CRUNCH_MVDX0]      @ save 64b registers
98         cfstr64         mvdx1, [r1, #CRUNCH_MVDX1]
99         cfstr64         mvdx2, [r1, #CRUNCH_MVDX2]
100         cfstr64         mvdx3, [r1, #CRUNCH_MVDX3]
101         cfstr64         mvdx4, [r1, #CRUNCH_MVDX4]
102         cfstr64         mvdx5, [r1, #CRUNCH_MVDX5]
103         cfstr64         mvdx6, [r1, #CRUNCH_MVDX6]
104         cfstr64         mvdx7, [r1, #CRUNCH_MVDX7]
105         cfstr64         mvdx8, [r1, #CRUNCH_MVDX8]
106         cfstr64         mvdx9, [r1, #CRUNCH_MVDX9]
107         cfstr64         mvdx10, [r1, #CRUNCH_MVDX10]
108         cfstr64         mvdx11, [r1, #CRUNCH_MVDX11]
109         cfstr64         mvdx12, [r1, #CRUNCH_MVDX12]
110         cfstr64         mvdx13, [r1, #CRUNCH_MVDX13]
111         cfstr64         mvdx14, [r1, #CRUNCH_MVDX14]
112         cfstr64         mvdx15, [r1, #CRUNCH_MVDX15]
113
114 #ifdef __ARMEB__
115 #error fix me for ARMEB
116 #endif
117
118         cfmv32al        mvfx0, mvax0                    @ save 72b accumulators
119         cfstr32         mvfx0, [r1, #CRUNCH_MVAX0L]
120         cfmv32am        mvfx0, mvax0
121         cfstr32         mvfx0, [r1, #CRUNCH_MVAX0M]
122         cfmv32ah        mvfx0, mvax0
123         cfstr32         mvfx0, [r1, #CRUNCH_MVAX0H]
124         cfmv32al        mvfx0, mvax1
125         cfstr32         mvfx0, [r1, #CRUNCH_MVAX1L]
126         cfmv32am        mvfx0, mvax1
127         cfstr32         mvfx0, [r1, #CRUNCH_MVAX1M]
128         cfmv32ah        mvfx0, mvax1
129         cfstr32         mvfx0, [r1, #CRUNCH_MVAX1H]
130         cfmv32al        mvfx0, mvax2
131         cfstr32         mvfx0, [r1, #CRUNCH_MVAX2L]
132         cfmv32am        mvfx0, mvax2
133         cfstr32         mvfx0, [r1, #CRUNCH_MVAX2M]
134         cfmv32ah        mvfx0, mvax2
135         cfstr32         mvfx0, [r1, #CRUNCH_MVAX2H]
136         cfmv32al        mvfx0, mvax3
137         cfstr32         mvfx0, [r1, #CRUNCH_MVAX3L]
138         cfmv32am        mvfx0, mvax3
139         cfstr32         mvfx0, [r1, #CRUNCH_MVAX3M]
140         cfmv32ah        mvfx0, mvax3
141         cfstr32         mvfx0, [r1, #CRUNCH_MVAX3H]
142
143         cfmv32sc        mvdx0, dspsc                    @ save status word
144         cfstr64         mvdx0, [r1, #CRUNCH_DSPSC]
145
146         teq             r0, #0                          @ anything to load?
147         cfldr64eq       mvdx0, [r1, #CRUNCH_MVDX0]      @ mvdx0 was clobbered
148         beq             1f
149
150 crunch_load:
151         cfldr64         mvdx0, [r0, #CRUNCH_DSPSC]      @ load status word
152         cfmvsc32        dspsc, mvdx0
153
154         cfldr32         mvfx0, [r0, #CRUNCH_MVAX0L]     @ load 72b accumulators
155         cfmval32        mvax0, mvfx0
156         cfldr32         mvfx0, [r0, #CRUNCH_MVAX0M]
157         cfmvam32        mvax0, mvfx0
158         cfldr32         mvfx0, [r0, #CRUNCH_MVAX0H]
159         cfmvah32        mvax0, mvfx0
160         cfldr32         mvfx0, [r0, #CRUNCH_MVAX1L]
161         cfmval32        mvax1, mvfx0
162         cfldr32         mvfx0, [r0, #CRUNCH_MVAX1M]
163         cfmvam32        mvax1, mvfx0
164         cfldr32         mvfx0, [r0, #CRUNCH_MVAX1H]
165         cfmvah32        mvax1, mvfx0
166         cfldr32         mvfx0, [r0, #CRUNCH_MVAX2L]
167         cfmval32        mvax2, mvfx0
168         cfldr32         mvfx0, [r0, #CRUNCH_MVAX2M]
169         cfmvam32        mvax2, mvfx0
170         cfldr32         mvfx0, [r0, #CRUNCH_MVAX2H]
171         cfmvah32        mvax2, mvfx0
172         cfldr32         mvfx0, [r0, #CRUNCH_MVAX3L]
173         cfmval32        mvax3, mvfx0
174         cfldr32         mvfx0, [r0, #CRUNCH_MVAX3M]
175         cfmvam32        mvax3, mvfx0
176         cfldr32         mvfx0, [r0, #CRUNCH_MVAX3H]
177         cfmvah32        mvax3, mvfx0
178
179         cfldr64         mvdx0, [r0, #CRUNCH_MVDX0]      @ load 64b registers
180         cfldr64         mvdx1, [r0, #CRUNCH_MVDX1]
181         cfldr64         mvdx2, [r0, #CRUNCH_MVDX2]
182         cfldr64         mvdx3, [r0, #CRUNCH_MVDX3]
183         cfldr64         mvdx4, [r0, #CRUNCH_MVDX4]
184         cfldr64         mvdx5, [r0, #CRUNCH_MVDX5]
185         cfldr64         mvdx6, [r0, #CRUNCH_MVDX6]
186         cfldr64         mvdx7, [r0, #CRUNCH_MVDX7]
187         cfldr64         mvdx8, [r0, #CRUNCH_MVDX8]
188         cfldr64         mvdx9, [r0, #CRUNCH_MVDX9]
189         cfldr64         mvdx10, [r0, #CRUNCH_MVDX10]
190         cfldr64         mvdx11, [r0, #CRUNCH_MVDX11]
191         cfldr64         mvdx12, [r0, #CRUNCH_MVDX12]
192         cfldr64         mvdx13, [r0, #CRUNCH_MVDX13]
193         cfldr64         mvdx14, [r0, #CRUNCH_MVDX14]
194         cfldr64         mvdx15, [r0, #CRUNCH_MVDX15]
195
196 1:
197 #ifdef CONFIG_PREEMPT_COUNT
198         get_thread_info r10
199 #endif
200 2:      dec_preempt_count r10, r3
201         ret     lr
202
203 /*
204  * Back up crunch regs to save area and disable access to them
205  * (mainly for gdb or sleep mode usage)
206  *
207  * r0 = struct thread_info pointer of target task or NULL for any
208  */
209 ENTRY(crunch_task_disable)
210         stmfd   sp!, {r4, r5, lr}
211
212         mrs     ip, cpsr
213         orr     r2, ip, #PSR_I_BIT              @ disable interrupts
214         msr     cpsr_c, r2
215
216         ldr     r4, =(EP93XX_APB_VIRT_BASE + 0x00130000)        @ syscon addr
217
218         ldr     r3, =crunch_owner
219         add     r2, r0, #TI_CRUNCH_STATE        @ get task crunch save area
220         ldr     r1, [r3]                        @ get current crunch owner
221         teq     r1, #0                          @ any current owner?
222         beq     1f                              @ no: quit
223         teq     r0, #0                          @ any owner?
224         teqne   r1, r2                          @ or specified one?
225         bne     1f                              @ no: quit
226
227         ldr     r5, [r4, #0x80]                 @ enable access to crunch
228         mov     r2, #0xaa
229         str     r2, [r4, #0xc0]
230         orr     r5, r5, #0x00800000
231         str     r5, [r4, #0x80]
232
233         mov     r0, #0                          @ nothing to load
234         str     r0, [r3]                        @ no more current owner
235         ldr     r2, [r4, #0x80]                 @ flush out enable (@@@)
236         mov     r2, r2
237         bl      crunch_save
238
239         mov     r2, #0xaa                       @ disable access to crunch
240         str     r2, [r4, #0xc0]
241         bic     r5, r5, #0x00800000
242         str     r5, [r4, #0x80]
243         ldr     r5, [r4, #0x80]                 @ flush out enable (@@@)
244         mov     r5, r5
245
246 1:      msr     cpsr_c, ip                      @ restore interrupt mode
247         ldmfd   sp!, {r4, r5, pc}
248
249 /*
250  * Copy crunch state to given memory address
251  *
252  * r0 = struct thread_info pointer of target task
253  * r1 = memory address where to store crunch state
254  *
255  * this is called mainly in the creation of signal stack frames
256  */
257 ENTRY(crunch_task_copy)
258         mrs     ip, cpsr
259         orr     r2, ip, #PSR_I_BIT              @ disable interrupts
260         msr     cpsr_c, r2
261
262         ldr     r3, =crunch_owner
263         add     r2, r0, #TI_CRUNCH_STATE        @ get task crunch save area
264         ldr     r3, [r3]                        @ get current crunch owner
265         teq     r2, r3                          @ does this task own it...
266         beq     1f
267
268         @ current crunch values are in the task save area
269         msr     cpsr_c, ip                      @ restore interrupt mode
270         mov     r0, r1
271         mov     r1, r2
272         mov     r2, #CRUNCH_SIZE
273         b       memcpy
274
275 1:      @ this task owns crunch regs -- grab a copy from there
276         mov     r0, #0                          @ nothing to load
277         mov     r3, lr                          @ preserve return address
278         bl      crunch_save
279         msr     cpsr_c, ip                      @ restore interrupt mode
280         ret     r3
281
282 /*
283  * Restore crunch state from given memory address
284  *
285  * r0 = struct thread_info pointer of target task
286  * r1 = memory address where to get crunch state from
287  *
288  * this is used to restore crunch state when unwinding a signal stack frame
289  */
290 ENTRY(crunch_task_restore)
291         mrs     ip, cpsr
292         orr     r2, ip, #PSR_I_BIT              @ disable interrupts
293         msr     cpsr_c, r2
294
295         ldr     r3, =crunch_owner
296         add     r2, r0, #TI_CRUNCH_STATE        @ get task crunch save area
297         ldr     r3, [r3]                        @ get current crunch owner
298         teq     r2, r3                          @ does this task own it...
299         beq     1f
300
301         @ this task doesn't own crunch regs -- use its save area
302         msr     cpsr_c, ip                      @ restore interrupt mode
303         mov     r0, r2
304         mov     r2, #CRUNCH_SIZE
305         b       memcpy
306
307 1:      @ this task owns crunch regs -- load them directly
308         mov     r0, r1
309         mov     r1, #0                          @ nothing to save
310         mov     r3, lr                          @ preserve return address
311         bl      crunch_load
312         msr     cpsr_c, ip                      @ restore interrupt mode
313         ret     r3