c941abdb8f85586bf648c2a6fe53ba9350dcb5f0
[sfrench/cifs-2.6.git] / arch / metag / lib / usercopy.c
1 /*
2  * User address space access functions.
3  * The non-inlined parts of asm-metag/uaccess.h are here.
4  *
5  * Copyright (C) 2006, Imagination Technologies.
6  * Copyright (C) 2000, Axis Communications AB.
7  *
8  * Written by Hans-Peter Nilsson.
9  * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
10  * Modified for Meta by Will Newton.
11  */
12
13 #include <linux/export.h>
14 #include <linux/uaccess.h>
15 #include <asm/cache.h>                  /* def of L1_CACHE_BYTES */
16
17 #define USE_RAPF
18 #define RAPF_MIN_BUF_SIZE       (3*L1_CACHE_BYTES)
19
20
21 /* The "double write" in this code is because the Meta will not fault
22  * immediately unless the memory pipe is forced to by e.g. a data stall or
23  * another memory op. The second write should be discarded by the write
24  * combiner so should have virtually no cost.
25  */
26
27 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
28         asm volatile (                                           \
29                 COPY                                             \
30                 "1:\n"                                           \
31                 "       .section .fixup,\"ax\"\n"                \
32                 FIXUP                                            \
33                 "       MOVT    D1Ar1,#HI(1b)\n"                 \
34                 "       JUMP    D1Ar1,#LO(1b)\n"                 \
35                 "       .previous\n"                             \
36                 "       .section __ex_table,\"a\"\n"             \
37                 TENTRY                                           \
38                 "       .previous\n"                             \
39                 : "=r" (to), "=r" (from), "=r" (ret)             \
40                 : "0" (to), "1" (from), "2" (ret)                \
41                 : "D1Ar1", "memory")
42
43
44 #define __asm_copy_to_user_1(to, from, ret)     \
45         __asm_copy_user_cont(to, from, ret,     \
46                 "       GETB D1Ar1,[%1++]\n"    \
47                 "       SETB [%0],D1Ar1\n"      \
48                 "2:     SETB [%0++],D1Ar1\n",   \
49                 "3:     ADD  %2,%2,#1\n",       \
50                 "       .long 2b,3b\n")
51
52 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
53         __asm_copy_user_cont(to, from, ret,             \
54                 "       GETW D1Ar1,[%1++]\n"            \
55                 "       SETW [%0],D1Ar1\n"              \
56                 "2:     SETW [%0++],D1Ar1\n" COPY,      \
57                 "3:     ADD  %2,%2,#2\n" FIXUP,         \
58                 "       .long 2b,3b\n" TENTRY)
59
60 #define __asm_copy_to_user_2(to, from, ret) \
61         __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
62
63 #define __asm_copy_to_user_3(to, from, ret) \
64         __asm_copy_to_user_2x_cont(to, from, ret,       \
65                 "       GETB D1Ar1,[%1++]\n"            \
66                 "       SETB [%0],D1Ar1\n"              \
67                 "4:     SETB [%0++],D1Ar1\n",           \
68                 "5:     ADD  %2,%2,#1\n",               \
69                 "       .long 4b,5b\n")
70
71 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
72         __asm_copy_user_cont(to, from, ret,             \
73                 "       GETD D1Ar1,[%1++]\n"            \
74                 "       SETD [%0],D1Ar1\n"              \
75                 "2:     SETD [%0++],D1Ar1\n" COPY,      \
76                 "3:     ADD  %2,%2,#4\n" FIXUP,         \
77                 "       .long 2b,3b\n" TENTRY)
78
79 #define __asm_copy_to_user_4(to, from, ret) \
80         __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
81
82 #define __asm_copy_to_user_5(to, from, ret) \
83         __asm_copy_to_user_4x_cont(to, from, ret,       \
84                 "       GETB D1Ar1,[%1++]\n"            \
85                 "       SETB [%0],D1Ar1\n"              \
86                 "4:     SETB [%0++],D1Ar1\n",           \
87                 "5:     ADD  %2,%2,#1\n",               \
88                 "       .long 4b,5b\n")
89
90 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
91         __asm_copy_to_user_4x_cont(to, from, ret,       \
92                 "       GETW D1Ar1,[%1++]\n"            \
93                 "       SETW [%0],D1Ar1\n"              \
94                 "4:     SETW [%0++],D1Ar1\n" COPY,      \
95                 "5:     ADD  %2,%2,#2\n" FIXUP,         \
96                 "       .long 4b,5b\n" TENTRY)
97
98 #define __asm_copy_to_user_6(to, from, ret) \
99         __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
100
101 #define __asm_copy_to_user_7(to, from, ret) \
102         __asm_copy_to_user_6x_cont(to, from, ret,       \
103                 "       GETB D1Ar1,[%1++]\n"            \
104                 "       SETB [%0],D1Ar1\n"              \
105                 "6:     SETB [%0++],D1Ar1\n",           \
106                 "7:     ADD  %2,%2,#1\n",               \
107                 "       .long 6b,7b\n")
108
109 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
110         __asm_copy_to_user_4x_cont(to, from, ret,       \
111                 "       GETD D1Ar1,[%1++]\n"            \
112                 "       SETD [%0],D1Ar1\n"              \
113                 "4:     SETD [%0++],D1Ar1\n" COPY,      \
114                 "5:     ADD  %2,%2,#4\n"  FIXUP,        \
115                 "       .long 4b,5b\n" TENTRY)
116
117 #define __asm_copy_to_user_8(to, from, ret) \
118         __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
119
120 #define __asm_copy_to_user_9(to, from, ret) \
121         __asm_copy_to_user_8x_cont(to, from, ret,       \
122                 "       GETB D1Ar1,[%1++]\n"            \
123                 "       SETB [%0],D1Ar1\n"              \
124                 "6:     SETB [%0++],D1Ar1\n",           \
125                 "7:     ADD  %2,%2,#1\n",               \
126                 "       .long 6b,7b\n")
127
128 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
129         __asm_copy_to_user_8x_cont(to, from, ret,       \
130                 "       GETW D1Ar1,[%1++]\n"            \
131                 "       SETW [%0],D1Ar1\n"              \
132                 "6:     SETW [%0++],D1Ar1\n" COPY,      \
133                 "7:     ADD  %2,%2,#2\n" FIXUP,         \
134                 "       .long 6b,7b\n" TENTRY)
135
136 #define __asm_copy_to_user_10(to, from, ret) \
137         __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
138
139 #define __asm_copy_to_user_11(to, from, ret) \
140         __asm_copy_to_user_10x_cont(to, from, ret,      \
141                 "       GETB D1Ar1,[%1++]\n"            \
142                 "       SETB [%0],D1Ar1\n"              \
143                 "8:     SETB [%0++],D1Ar1\n",           \
144                 "9:     ADD  %2,%2,#1\n",               \
145                 "       .long 8b,9b\n")
146
147 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
148         __asm_copy_to_user_8x_cont(to, from, ret,       \
149                 "       GETD D1Ar1,[%1++]\n"            \
150                 "       SETD [%0],D1Ar1\n"              \
151                 "6:     SETD [%0++],D1Ar1\n" COPY,      \
152                 "7:     ADD  %2,%2,#4\n" FIXUP,         \
153                 "       .long 6b,7b\n" TENTRY)
154 #define __asm_copy_to_user_12(to, from, ret) \
155         __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
156
157 #define __asm_copy_to_user_13(to, from, ret) \
158         __asm_copy_to_user_12x_cont(to, from, ret,      \
159                 "       GETB D1Ar1,[%1++]\n"            \
160                 "       SETB [%0],D1Ar1\n"              \
161                 "8:     SETB [%0++],D1Ar1\n",           \
162                 "9:     ADD  %2,%2,#1\n",               \
163                 "       .long 8b,9b\n")
164
165 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
166         __asm_copy_to_user_12x_cont(to, from, ret,      \
167                 "       GETW D1Ar1,[%1++]\n"            \
168                 "       SETW [%0],D1Ar1\n"              \
169                 "8:     SETW [%0++],D1Ar1\n" COPY,      \
170                 "9:     ADD  %2,%2,#2\n" FIXUP,         \
171                 "       .long 8b,9b\n" TENTRY)
172
173 #define __asm_copy_to_user_14(to, from, ret) \
174         __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
175
176 #define __asm_copy_to_user_15(to, from, ret) \
177         __asm_copy_to_user_14x_cont(to, from, ret,      \
178                 "       GETB D1Ar1,[%1++]\n"            \
179                 "       SETB [%0],D1Ar1\n"              \
180                 "10:    SETB [%0++],D1Ar1\n",           \
181                 "11:    ADD  %2,%2,#1\n",               \
182                 "       .long 10b,11b\n")
183
184 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
185         __asm_copy_to_user_12x_cont(to, from, ret,      \
186                 "       GETD D1Ar1,[%1++]\n"            \
187                 "       SETD [%0],D1Ar1\n"              \
188                 "8:     SETD [%0++],D1Ar1\n" COPY,      \
189                 "9:     ADD  %2,%2,#4\n" FIXUP,         \
190                 "       .long 8b,9b\n" TENTRY)
191
192 #define __asm_copy_to_user_16(to, from, ret) \
193                 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
194
195 #define __asm_copy_to_user_8x64(to, from, ret) \
196         asm volatile (                                  \
197                 "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
198                 "       SETL [%0],D0Ar2,D1Ar1\n"        \
199                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
200                 "1:\n"                                  \
201                 "       .section .fixup,\"ax\"\n"       \
202                 "3:     ADD  %2,%2,#8\n"                \
203                 "       MOVT    D0Ar2,#HI(1b)\n"        \
204                 "       JUMP    D0Ar2,#LO(1b)\n"        \
205                 "       .previous\n"                    \
206                 "       .section __ex_table,\"a\"\n"    \
207                 "       .long 2b,3b\n"                  \
208                 "       .previous\n"                    \
209                 : "=r" (to), "=r" (from), "=r" (ret)    \
210                 : "0" (to), "1" (from), "2" (ret)       \
211                 : "D1Ar1", "D0Ar2", "memory")
212
213 /*
214  *      optimized copying loop using RAPF when 64 bit aligned
215  *
216  *      n               will be automatically decremented inside the loop
217  *      ret             will be left intact. if error occurs we will rewind
218  *                      so that the original non optimized code will fill up
219  *                      this value correctly.
220  *
221  *      on fault:
222  *              >       n will hold total number of uncopied bytes
223  *
224  *              >       {'to','from'} will be rewind back so that
225  *                      the non-optimized code will do the proper fix up
226  *
227  *      DCACHE drops the cacheline which helps in reducing cache
228  *      pollution.
229  *
230  *      We introduce an extra SETL at the end of the loop to
231  *      ensure we don't fall off the loop before we catch all
232  *      erros.
233  *
234  *      NOTICE:
235  *              LSM_STEP in TXSTATUS must be cleared in fix up code.
236  *              since we're using M{S,G}ETL, a fault might happen at
237  *              any address in the middle of M{S,G}ETL causing
238  *              the value of LSM_STEP to be incorrect which can
239  *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
240  *              ie: if LSM_STEP was 1 when a fault occurs, the
241  *              next call to M{S,G}ET{L,D} will skip the first
242  *              copy/getting as it think that the first 1 has already
243  *              been done.
244  *
245  */
246 #define __asm_copy_user_64bit_rapf_loop(                                \
247                 to, from, ret, n, id, FIXUP)                            \
248         asm volatile (                                                  \
249                         ".balign 8\n"                                   \
250                 "       MOV     RAPF, %1\n"                             \
251                 "       MSETL   [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
252                 "       MOV     D0Ar6, #0\n"                            \
253                 "       LSR     D1Ar5, %3, #6\n"                        \
254                 "       SUB     TXRPT, D1Ar5, #2\n"                     \
255                 "       MOV     RAPF, %1\n"                             \
256                 "$Lloop"id":\n"                                         \
257                 "       ADD     RAPF, %1, #64\n"                        \
258                 "21:    MGETL   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
259                 "22:    MSETL   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
260                 "23:    SUB     %3, %3, #32\n"                          \
261                 "24:    MGETL   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
262                 "25:    MSETL   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
263                 "26:    SUB     %3, %3, #32\n"                          \
264                 "       DCACHE  [%1+#-64], D0Ar6\n"                     \
265                 "       BR      $Lloop"id"\n"                           \
266                                                                         \
267                 "       MOV     RAPF, %1\n"                             \
268                 "27:    MGETL   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
269                 "28:    MSETL   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
270                 "29:    SUB     %3, %3, #32\n"                          \
271                 "30:    MGETL   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
272                 "31:    MSETL   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
273                 "32:    SETL    [%0+#-8], D0.7, D1.7\n"                 \
274                 "       SUB     %3, %3, #32\n"                          \
275                 "1:     DCACHE  [%1+#-64], D0Ar6\n"                     \
276                 "       GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"           \
277                 "       GETL    D0FrT, D1RtP, [A0StP+#-32]\n"           \
278                 "       GETL    D0.5, D1.5, [A0StP+#-24]\n"             \
279                 "       GETL    D0.6, D1.6, [A0StP+#-16]\n"             \
280                 "       GETL    D0.7, D1.7, [A0StP+#-8]\n"              \
281                 "       SUB     A0StP, A0StP, #40\n"                    \
282                 "       .section .fixup,\"ax\"\n"                       \
283                 "3:     MOV     D0Ar2, TXSTATUS\n"                      \
284                 "       MOV     D1Ar1, TXSTATUS\n"                      \
285                 "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
286                 "       MOV     TXSTATUS, D1Ar1\n"                      \
287                         FIXUP                                           \
288                 "       MOVT    D0Ar2, #HI(1b)\n"                       \
289                 "       JUMP    D0Ar2, #LO(1b)\n"                       \
290                 "       .previous\n"                                    \
291                 "       .section __ex_table,\"a\"\n"                    \
292                 "       .long 21b,3b\n"                                 \
293                 "       .long 22b,3b\n"                                 \
294                 "       .long 23b,3b\n"                                 \
295                 "       .long 24b,3b\n"                                 \
296                 "       .long 25b,3b\n"                                 \
297                 "       .long 26b,3b\n"                                 \
298                 "       .long 27b,3b\n"                                 \
299                 "       .long 28b,3b\n"                                 \
300                 "       .long 29b,3b\n"                                 \
301                 "       .long 30b,3b\n"                                 \
302                 "       .long 31b,3b\n"                                 \
303                 "       .long 32b,3b\n"                                 \
304                 "       .previous\n"                                    \
305                 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
306                 : "0" (to), "1" (from), "2" (ret), "3" (n)              \
307                 : "D1Ar1", "D0Ar2", "cc", "memory")
308
309 /*      rewind 'to' and 'from'  pointers when a fault occurs
310  *
311  *      Rationale:
312  *              A fault always occurs on writing to user buffer. A fault
313  *              is at a single address, so we need to rewind by only 4
314  *              bytes.
315  *              Since we do a complete read from kernel buffer before
316  *              writing, we need to rewind it also. The amount to be
317  *              rewind equals the number of faulty writes in MSETD
318  *              which is: [4 - (LSM_STEP-1)]*8
319  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
320  *              and stored in D0Ar2
321  *
322  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
323  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
324  *                      a fault happens at the 4th write, LSM_STEP will be 0
325  *                      instead of 4. The code copes with that.
326  *
327  *              n is updated by the number of successful writes, which is:
328  *              n = n - (LSM_STEP-1)*8
329  */
330 #define __asm_copy_to_user_64bit_rapf_loop(to,  from, ret, n, id)\
331         __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
332                 "LSR    D0Ar2, D0Ar2, #8\n"                             \
333                 "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
334                 "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
335                 "SUB    D0Ar2, D0Ar2, #1\n"                             \
336                 "MOV    D1Ar1, #4\n"                                    \
337                 "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
338                 "LSL    D0Ar2, D0Ar2, #3\n"                             \
339                 "LSL    D1Ar1, D1Ar1, #3\n"                             \
340                 "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
341                 "SUB    %0, %0, #8\n"                                   \
342                 "SUB    %1,     %1,D0Ar2\n"                             \
343                 "SUB    %3, %3, D1Ar1\n")
344
345 /*
346  *      optimized copying loop using RAPF when 32 bit aligned
347  *
348  *      n               will be automatically decremented inside the loop
349  *      ret             will be left intact. if error occurs we will rewind
350  *                      so that the original non optimized code will fill up
351  *                      this value correctly.
352  *
353  *      on fault:
354  *              >       n will hold total number of uncopied bytes
355  *
356  *              >       {'to','from'} will be rewind back so that
357  *                      the non-optimized code will do the proper fix up
358  *
359  *      DCACHE drops the cacheline which helps in reducing cache
360  *      pollution.
361  *
362  *      We introduce an extra SETD at the end of the loop to
363  *      ensure we don't fall off the loop before we catch all
364  *      erros.
365  *
366  *      NOTICE:
367  *              LSM_STEP in TXSTATUS must be cleared in fix up code.
368  *              since we're using M{S,G}ETL, a fault might happen at
369  *              any address in the middle of M{S,G}ETL causing
370  *              the value of LSM_STEP to be incorrect which can
371  *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
372  *              ie: if LSM_STEP was 1 when a fault occurs, the
373  *              next call to M{S,G}ET{L,D} will skip the first
374  *              copy/getting as it think that the first 1 has already
375  *              been done.
376  *
377  */
378 #define __asm_copy_user_32bit_rapf_loop(                                \
379                         to,     from, ret, n, id, FIXUP)                \
380         asm volatile (                                                  \
381                         ".balign 8\n"                                   \
382                 "       MOV     RAPF, %1\n"                             \
383                 "       MSETL   [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
384                 "       MOV     D0Ar6, #0\n"                            \
385                 "       LSR     D1Ar5, %3, #6\n"                        \
386                 "       SUB     TXRPT, D1Ar5, #2\n"                     \
387                 "       MOV     RAPF, %1\n"                             \
388                 "$Lloop"id":\n"                                         \
389                 "       ADD     RAPF, %1, #64\n"                        \
390                 "21:    MGETD   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
391                 "22:    MSETD   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
392                 "23:    SUB     %3, %3, #16\n"                          \
393                 "24:    MGETD   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
394                 "25:    MSETD   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
395                 "26:    SUB     %3, %3, #16\n"                          \
396                 "27:    MGETD   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
397                 "28:    MSETD   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
398                 "29:    SUB     %3, %3, #16\n"                          \
399                 "30:    MGETD   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
400                 "31:    MSETD   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
401                 "32:    SUB     %3, %3, #16\n"                          \
402                 "       DCACHE  [%1+#-64], D0Ar6\n"                     \
403                 "       BR      $Lloop"id"\n"                           \
404                                                                         \
405                 "       MOV     RAPF, %1\n"                             \
406                 "33:    MGETD   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
407                 "34:    MSETD   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
408                 "35:    SUB     %3, %3, #16\n"                          \
409                 "36:    MGETD   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
410                 "37:    MSETD   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
411                 "38:    SUB     %3, %3, #16\n"                          \
412                 "39:    MGETD   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
413                 "40:    MSETD   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
414                 "41:    SUB     %3, %3, #16\n"                          \
415                 "42:    MGETD   D0FrT, D0.5, D0.6, D0.7, [%1++]\n"      \
416                 "43:    MSETD   [%0++], D0FrT, D0.5, D0.6, D0.7\n"      \
417                 "44:    SETD    [%0+#-4], D0.7\n"                       \
418                 "       SUB     %3, %3, #16\n"                          \
419                 "1:     DCACHE  [%1+#-64], D0Ar6\n"                     \
420                 "       GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"           \
421                 "       GETL    D0FrT, D1RtP, [A0StP+#-32]\n"           \
422                 "       GETL    D0.5, D1.5, [A0StP+#-24]\n"             \
423                 "       GETL    D0.6, D1.6, [A0StP+#-16]\n"             \
424                 "       GETL    D0.7, D1.7, [A0StP+#-8]\n"              \
425                 "       SUB A0StP, A0StP, #40\n"                        \
426                 "       .section .fixup,\"ax\"\n"                       \
427                 "3:     MOV     D0Ar2, TXSTATUS\n"                      \
428                 "       MOV     D1Ar1, TXSTATUS\n"                      \
429                 "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
430                 "       MOV     TXSTATUS, D1Ar1\n"                      \
431                         FIXUP                                           \
432                 "       MOVT    D0Ar2, #HI(1b)\n"                       \
433                 "       JUMP    D0Ar2, #LO(1b)\n"                       \
434                 "       .previous\n"                                    \
435                 "       .section __ex_table,\"a\"\n"                    \
436                 "       .long 21b,3b\n"                                 \
437                 "       .long 22b,3b\n"                                 \
438                 "       .long 23b,3b\n"                                 \
439                 "       .long 24b,3b\n"                                 \
440                 "       .long 25b,3b\n"                                 \
441                 "       .long 26b,3b\n"                                 \
442                 "       .long 27b,3b\n"                                 \
443                 "       .long 28b,3b\n"                                 \
444                 "       .long 29b,3b\n"                                 \
445                 "       .long 30b,3b\n"                                 \
446                 "       .long 31b,3b\n"                                 \
447                 "       .long 32b,3b\n"                                 \
448                 "       .long 33b,3b\n"                                 \
449                 "       .long 34b,3b\n"                                 \
450                 "       .long 35b,3b\n"                                 \
451                 "       .long 36b,3b\n"                                 \
452                 "       .long 37b,3b\n"                                 \
453                 "       .long 38b,3b\n"                                 \
454                 "       .long 39b,3b\n"                                 \
455                 "       .long 40b,3b\n"                                 \
456                 "       .long 41b,3b\n"                                 \
457                 "       .long 42b,3b\n"                                 \
458                 "       .long 43b,3b\n"                                 \
459                 "       .long 44b,3b\n"                                 \
460                 "       .previous\n"                                    \
461                 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
462                 : "0" (to), "1" (from), "2" (ret), "3" (n)              \
463                 : "D1Ar1", "D0Ar2", "cc", "memory")
464
465 /*      rewind 'to' and 'from'  pointers when a fault occurs
466  *
467  *      Rationale:
468  *              A fault always occurs on writing to user buffer. A fault
469  *              is at a single address, so we need to rewind by only 4
470  *              bytes.
471  *              Since we do a complete read from kernel buffer before
472  *              writing, we need to rewind it also. The amount to be
473  *              rewind equals the number of faulty writes in MSETD
474  *              which is: [4 - (LSM_STEP-1)]*4
475  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
476  *              and stored in D0Ar2
477  *
478  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
479  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
480  *                      a fault happens at the 4th write, LSM_STEP will be 0
481  *                      instead of 4. The code copes with that.
482  *
483  *              n is updated by the number of successful writes, which is:
484  *              n = n - (LSM_STEP-1)*4
485  */
486 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
487         __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
488                 "LSR    D0Ar2, D0Ar2, #8\n"                             \
489                 "ANDS   D0Ar2, D0Ar2, #0x7\n"                           \
490                 "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
491                 "SUB    D0Ar2, D0Ar2, #1\n"                             \
492                 "MOV    D1Ar1, #4\n"                                    \
493                 "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
494                 "LSL    D0Ar2, D0Ar2, #2\n"                             \
495                 "LSL    D1Ar1, D1Ar1, #2\n"                             \
496                 "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
497                 "SUB    %0, %0, #4\n"                                   \
498                 "SUB    %1,     %1,     D0Ar2\n"                        \
499                 "SUB    %3, %3, D1Ar1\n")
500
501 unsigned long raw_copy_to_user(void __user *pdst, const void *psrc,
502                                unsigned long n)
503 {
504         register char __user *dst asm ("A0.2") = pdst;
505         register const char *src asm ("A1.2") = psrc;
506         unsigned long retn = 0;
507
508         if (n == 0)
509                 return 0;
510
511         if ((unsigned long) src & 1) {
512                 __asm_copy_to_user_1(dst, src, retn);
513                 n--;
514                 if (retn)
515                         return retn + n;
516         }
517         if ((unsigned long) dst & 1) {
518                 /* Worst case - byte copy */
519                 while (n > 0) {
520                         __asm_copy_to_user_1(dst, src, retn);
521                         n--;
522                         if (retn)
523                                 return retn + n;
524                 }
525         }
526         if (((unsigned long) src & 2) && n >= 2) {
527                 __asm_copy_to_user_2(dst, src, retn);
528                 n -= 2;
529                 if (retn)
530                         return retn + n;
531         }
532         if ((unsigned long) dst & 2) {
533                 /* Second worst case - word copy */
534                 while (n >= 2) {
535                         __asm_copy_to_user_2(dst, src, retn);
536                         n -= 2;
537                         if (retn)
538                                 return retn + n;
539                 }
540         }
541
542 #ifdef USE_RAPF
543         /* 64 bit copy loop */
544         if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
545                 if (n >= RAPF_MIN_BUF_SIZE) {
546                         /* copy user using 64 bit rapf copy */
547                         __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
548                                                         n, "64cu");
549                 }
550                 while (n >= 8) {
551                         __asm_copy_to_user_8x64(dst, src, retn);
552                         n -= 8;
553                         if (retn)
554                                 return retn + n;
555                 }
556         }
557         if (n >= RAPF_MIN_BUF_SIZE) {
558                 /* copy user using 32 bit rapf copy */
559                 __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
560         }
561 #else
562         /* 64 bit copy loop */
563         if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
564                 while (n >= 8) {
565                         __asm_copy_to_user_8x64(dst, src, retn);
566                         n -= 8;
567                         if (retn)
568                                 return retn + n;
569                 }
570         }
571 #endif
572
573         while (n >= 16) {
574                 __asm_copy_to_user_16(dst, src, retn);
575                 n -= 16;
576                 if (retn)
577                         return retn + n;
578         }
579
580         while (n >= 4) {
581                 __asm_copy_to_user_4(dst, src, retn);
582                 n -= 4;
583                 if (retn)
584                         return retn + n;
585         }
586
587         switch (n) {
588         case 0:
589                 break;
590         case 1:
591                 __asm_copy_to_user_1(dst, src, retn);
592                 break;
593         case 2:
594                 __asm_copy_to_user_2(dst, src, retn);
595                 break;
596         case 3:
597                 __asm_copy_to_user_3(dst, src, retn);
598                 break;
599         }
600
601         /*
602          * If we get here, retn correctly reflects the number of failing
603          * bytes.
604          */
605         return retn;
606 }
607 EXPORT_SYMBOL(raw_copy_to_user);
608
609 #define __asm_copy_from_user_1(to, from, ret) \
610         __asm_copy_user_cont(to, from, ret,     \
611                 "       GETB D1Ar1,[%1++]\n"    \
612                 "2:     SETB [%0++],D1Ar1\n",   \
613                 "3:     ADD  %2,%2,#1\n",       \
614                 "       .long 2b,3b\n")
615
616 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
617         __asm_copy_user_cont(to, from, ret,             \
618                 "       GETW D1Ar1,[%1++]\n"            \
619                 "2:     SETW [%0++],D1Ar1\n" COPY,      \
620                 "3:     ADD  %2,%2,#2\n" FIXUP,         \
621                 "       .long 2b,3b\n" TENTRY)
622
623 #define __asm_copy_from_user_2(to, from, ret) \
624         __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
625
626 #define __asm_copy_from_user_3(to, from, ret)           \
627         __asm_copy_from_user_2x_cont(to, from, ret,     \
628                 "       GETB D1Ar1,[%1++]\n"            \
629                 "4:     SETB [%0++],D1Ar1\n",           \
630                 "5:     ADD  %2,%2,#1\n",               \
631                 "       .long 4b,5b\n")
632
633 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
634         __asm_copy_user_cont(to, from, ret,             \
635                 "       GETD D1Ar1,[%1++]\n"            \
636                 "2:     SETD [%0++],D1Ar1\n" COPY,      \
637                 "3:     ADD  %2,%2,#4\n" FIXUP,         \
638                 "       .long 2b,3b\n" TENTRY)
639
640 #define __asm_copy_from_user_4(to, from, ret) \
641         __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
642
643 #define __asm_copy_from_user_8x64(to, from, ret) \
644         asm volatile (                          \
645                 "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
646                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
647                 "1:\n"                                  \
648                 "       .section .fixup,\"ax\"\n"       \
649                 "3:     ADD  %2,%2,#8\n"                \
650                 "       MOVT    D0Ar2,#HI(1b)\n"        \
651                 "       JUMP    D0Ar2,#LO(1b)\n"        \
652                 "       .previous\n"                    \
653                 "       .section __ex_table,\"a\"\n"    \
654                 "       .long 2b,3b\n"                  \
655                 "       .previous\n"                    \
656                 : "=a" (to), "=r" (from), "=r" (ret)    \
657                 : "0" (to), "1" (from), "2" (ret)       \
658                 : "D1Ar1", "D0Ar2", "memory")
659
660 /*      rewind 'from' pointer when a fault occurs
661  *
662  *      Rationale:
663  *              A fault occurs while reading from user buffer, which is the
664  *              source.
665  *              Since we don't write to kernel buffer until we read first,
666  *              the kernel buffer is at the right state and needn't be
667  *              corrected, but the source must be rewound to the beginning of
668  *              the block, which is LSM_STEP*8 bytes.
669  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
670  *              and stored in D0Ar2
671  *
672  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
673  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
674  *                      a fault happens at the 4th write, LSM_STEP will be 0
675  *                      instead of 4. The code copes with that.
676  */
677 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)      \
678         __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
679                 "LSR    D0Ar2, D0Ar2, #5\n"                             \
680                 "ANDS   D0Ar2, D0Ar2, #0x38\n"                          \
681                 "ADDZ   D0Ar2, D0Ar2, #32\n"                            \
682                 "SUB    %1, %1, D0Ar2\n")
683
684 /*      rewind 'from' pointer when a fault occurs
685  *
686  *      Rationale:
687  *              A fault occurs while reading from user buffer, which is the
688  *              source.
689  *              Since we don't write to kernel buffer until we read first,
690  *              the kernel buffer is at the right state and needn't be
691  *              corrected, but the source must be rewound to the beginning of
692  *              the block, which is LSM_STEP*4 bytes.
693  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
694  *              and stored in D0Ar2
695  *
696  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
697  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
698  *                      a fault happens at the 4th write, LSM_STEP will be 0
699  *                      instead of 4. The code copes with that.
700  */
701 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)      \
702         __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
703                 "LSR    D0Ar2, D0Ar2, #6\n"                             \
704                 "ANDS   D0Ar2, D0Ar2, #0x1c\n"                          \
705                 "ADDZ   D0Ar2, D0Ar2, #16\n"                            \
706                 "SUB    %1, %1, D0Ar2\n")
707
708
709 /*
710  * Copy from user to kernel. The return-value is the number of bytes that were
711  * inaccessible.
712  */
713 unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
714                                  unsigned long n)
715 {
716         register char *dst asm ("A0.2") = pdst;
717         register const char __user *src asm ("A1.2") = psrc;
718         unsigned long retn = 0;
719
720         if (n == 0)
721                 return 0;
722
723         if ((unsigned long) src & 1) {
724                 __asm_copy_from_user_1(dst, src, retn);
725                 n--;
726                 if (retn)
727                         return retn + n;
728         }
729         if ((unsigned long) dst & 1) {
730                 /* Worst case - byte copy */
731                 while (n > 0) {
732                         __asm_copy_from_user_1(dst, src, retn);
733                         n--;
734                         if (retn)
735                                 return retn + n;
736                 }
737         }
738         if (((unsigned long) src & 2) && n >= 2) {
739                 __asm_copy_from_user_2(dst, src, retn);
740                 n -= 2;
741                 if (retn)
742                         return retn + n;
743         }
744         if ((unsigned long) dst & 2) {
745                 /* Second worst case - word copy */
746                 while (n >= 2) {
747                         __asm_copy_from_user_2(dst, src, retn);
748                         n -= 2;
749                         if (retn)
750                                 return retn + n;
751                 }
752         }
753
754 #ifdef USE_RAPF
755         /* 64 bit copy loop */
756         if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
757                 if (n >= RAPF_MIN_BUF_SIZE) {
758                         /* Copy using fast 64bit rapf */
759                         __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
760                                                         n, "64cuz");
761                 }
762                 while (n >= 8) {
763                         __asm_copy_from_user_8x64(dst, src, retn);
764                         n -= 8;
765                         if (retn)
766                                 return retn + n;
767                 }
768         }
769
770         if (n >= RAPF_MIN_BUF_SIZE) {
771                 /* Copy using fast 32bit rapf */
772                 __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
773                                                 n, "32cuz");
774         }
775 #else
776         /* 64 bit copy loop */
777         if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
778                 while (n >= 8) {
779                         __asm_copy_from_user_8x64(dst, src, retn);
780                         n -= 8;
781                         if (retn)
782                                 return retn + n;
783                 }
784         }
785 #endif
786
787         while (n >= 4) {
788                 __asm_copy_from_user_4(dst, src, retn);
789                 n -= 4;
790
791                 if (retn)
792                         return retn + n;
793         }
794
795         /* If we get here, there were no memory read faults.  */
796         switch (n) {
797                 /* These copies are at least "naturally aligned" (so we don't
798                    have to check each byte), due to the src alignment code.
799                    The *_3 case *will* get the correct count for retn.  */
800         case 0:
801                 /* This case deliberately left in (if you have doubts check the
802                    generated assembly code).  */
803                 break;
804         case 1:
805                 __asm_copy_from_user_1(dst, src, retn);
806                 break;
807         case 2:
808                 __asm_copy_from_user_2(dst, src, retn);
809                 break;
810         case 3:
811                 __asm_copy_from_user_3(dst, src, retn);
812                 break;
813         }
814
815         /* If we get here, retn correctly reflects the number of failing
816            bytes.  */
817         return retn;
818 }
819 EXPORT_SYMBOL(raw_copy_from_user);
820
821 #define __asm_clear_8x64(to, ret) \
822         asm volatile (                                  \
823                 "       MOV  D0Ar2,#0\n"                \
824                 "       MOV  D1Ar1,#0\n"                \
825                 "       SETL [%0],D0Ar2,D1Ar1\n"        \
826                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
827                 "1:\n"                                  \
828                 "       .section .fixup,\"ax\"\n"       \
829                 "3:     ADD  %1,%1,#8\n"                \
830                 "       MOVT    D0Ar2,#HI(1b)\n"        \
831                 "       JUMP    D0Ar2,#LO(1b)\n"        \
832                 "       .previous\n"                    \
833                 "       .section __ex_table,\"a\"\n"    \
834                 "       .long 2b,3b\n"                  \
835                 "       .previous\n"                    \
836                 : "=r" (to), "=r" (ret) \
837                 : "0" (to), "1" (ret)   \
838                 : "D1Ar1", "D0Ar2", "memory")
839
840 /* Zero userspace.  */
841
842 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
843         asm volatile (                                  \
844                 "       MOV D1Ar1,#0\n"                 \
845                         CLEAR                           \
846                 "1:\n"                                  \
847                 "       .section .fixup,\"ax\"\n"       \
848                         FIXUP                           \
849                 "       MOVT    D1Ar1,#HI(1b)\n"        \
850                 "       JUMP    D1Ar1,#LO(1b)\n"        \
851                 "       .previous\n"                    \
852                 "       .section __ex_table,\"a\"\n"    \
853                         TENTRY                          \
854                 "       .previous"                      \
855                 : "=r" (to), "=r" (ret)                 \
856                 : "0" (to), "1" (ret)                   \
857                 : "D1Ar1", "memory")
858
859 #define __asm_clear_1(to, ret) \
860         __asm_clear(to, ret,                    \
861                 "       SETB [%0],D1Ar1\n"      \
862                 "2:     SETB [%0++],D1Ar1\n",   \
863                 "3:     ADD  %1,%1,#1\n",       \
864                 "       .long 2b,3b\n")
865
866 #define __asm_clear_2(to, ret) \
867         __asm_clear(to, ret,                    \
868                 "       SETW [%0],D1Ar1\n"      \
869                 "2:     SETW [%0++],D1Ar1\n",   \
870                 "3:     ADD  %1,%1,#2\n",       \
871                 "       .long 2b,3b\n")
872
873 #define __asm_clear_3(to, ret) \
874         __asm_clear(to, ret,                    \
875                  "2:    SETW [%0++],D1Ar1\n"    \
876                  "      SETB [%0],D1Ar1\n"      \
877                  "3:    SETB [%0++],D1Ar1\n",   \
878                  "4:    ADD  %1,%1,#2\n"        \
879                  "5:    ADD  %1,%1,#1\n",       \
880                  "      .long 2b,4b\n"          \
881                  "      .long 3b,5b\n")
882
883 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
884         __asm_clear(to, ret,                            \
885                 "       SETD [%0],D1Ar1\n"              \
886                 "2:     SETD [%0++],D1Ar1\n" CLEAR,     \
887                 "3:     ADD  %1,%1,#4\n" FIXUP,         \
888                 "       .long 2b,3b\n" TENTRY)
889
890 #define __asm_clear_4(to, ret) \
891         __asm_clear_4x_cont(to, ret, "", "", "")
892
893 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
894         __asm_clear_4x_cont(to, ret,                    \
895                 "       SETD [%0],D1Ar1\n"              \
896                 "4:     SETD [%0++],D1Ar1\n" CLEAR,     \
897                 "5:     ADD  %1,%1,#4\n" FIXUP,         \
898                 "       .long 4b,5b\n" TENTRY)
899
900 #define __asm_clear_8(to, ret) \
901         __asm_clear_8x_cont(to, ret, "", "", "")
902
903 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
904         __asm_clear_8x_cont(to, ret,                    \
905                 "       SETD [%0],D1Ar1\n"              \
906                 "6:     SETD [%0++],D1Ar1\n" CLEAR,     \
907                 "7:     ADD  %1,%1,#4\n" FIXUP,         \
908                 "       .long 6b,7b\n" TENTRY)
909
910 #define __asm_clear_12(to, ret) \
911         __asm_clear_12x_cont(to, ret, "", "", "")
912
913 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
914         __asm_clear_12x_cont(to, ret,                   \
915                 "       SETD [%0],D1Ar1\n"              \
916                 "8:     SETD [%0++],D1Ar1\n" CLEAR,     \
917                 "9:     ADD  %1,%1,#4\n" FIXUP,         \
918                 "       .long 8b,9b\n" TENTRY)
919
920 #define __asm_clear_16(to, ret) \
921         __asm_clear_16x_cont(to, ret, "", "", "")
922
923 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
924 {
925         register char __user *dst asm ("D0Re0") = pto;
926         register unsigned long n asm ("D1Re0") = pn;
927         register unsigned long retn asm ("D0Ar6") = 0;
928
929         if ((unsigned long) dst & 1) {
930                 __asm_clear_1(dst, retn);
931                 n--;
932         }
933
934         if ((unsigned long) dst & 2) {
935                 __asm_clear_2(dst, retn);
936                 n -= 2;
937         }
938
939         /* 64 bit copy loop */
940         if (!((__force unsigned long) dst & 7)) {
941                 while (n >= 8) {
942                         __asm_clear_8x64(dst, retn);
943                         n -= 8;
944                 }
945         }
946
947         while (n >= 16) {
948                 __asm_clear_16(dst, retn);
949                 n -= 16;
950         }
951
952         while (n >= 4) {
953                 __asm_clear_4(dst, retn);
954                 n -= 4;
955         }
956
957         switch (n) {
958         case 0:
959                 break;
960         case 1:
961                 __asm_clear_1(dst, retn);
962                 break;
963         case 2:
964                 __asm_clear_2(dst, retn);
965                 break;
966         case 3:
967                 __asm_clear_3(dst, retn);
968                 break;
969         }
970
971         return retn;
972 }
973 EXPORT_SYMBOL(__do_clear_user);
974
975 unsigned char __get_user_asm_b(const void __user *addr, long *err)
976 {
977         register unsigned char x asm ("D0Re0") = 0;
978         asm volatile (
979                 "       GETB %0,[%2]\n"
980                 "1:\n"
981                 "       GETB %0,[%2]\n"
982                 "2:\n"
983                 "       .section .fixup,\"ax\"\n"
984                 "3:     MOV     D0FrT,%3\n"
985                 "       SETD    [%1],D0FrT\n"
986                 "       MOVT    D0FrT,#HI(2b)\n"
987                 "       JUMP    D0FrT,#LO(2b)\n"
988                 "       .previous\n"
989                 "       .section __ex_table,\"a\"\n"
990                 "       .long 1b,3b\n"
991                 "       .previous\n"
992                 : "=r" (x)
993                 : "r" (err), "r" (addr), "P" (-EFAULT)
994                 : "D0FrT");
995         return x;
996 }
997 EXPORT_SYMBOL(__get_user_asm_b);
998
999 unsigned short __get_user_asm_w(const void __user *addr, long *err)
1000 {
1001         register unsigned short x asm ("D0Re0") = 0;
1002         asm volatile (
1003                 "       GETW %0,[%2]\n"
1004                 "1:\n"
1005                 "       GETW %0,[%2]\n"
1006                 "2:\n"
1007                 "       .section .fixup,\"ax\"\n"
1008                 "3:     MOV     D0FrT,%3\n"
1009                 "       SETD    [%1],D0FrT\n"
1010                 "       MOVT    D0FrT,#HI(2b)\n"
1011                 "       JUMP    D0FrT,#LO(2b)\n"
1012                 "       .previous\n"
1013                 "       .section __ex_table,\"a\"\n"
1014                 "       .long 1b,3b\n"
1015                 "       .previous\n"
1016                 : "=r" (x)
1017                 : "r" (err), "r" (addr), "P" (-EFAULT)
1018                 : "D0FrT");
1019         return x;
1020 }
1021 EXPORT_SYMBOL(__get_user_asm_w);
1022
1023 unsigned int __get_user_asm_d(const void __user *addr, long *err)
1024 {
1025         register unsigned int x asm ("D0Re0") = 0;
1026         asm volatile (
1027                 "       GETD %0,[%2]\n"
1028                 "1:\n"
1029                 "       GETD %0,[%2]\n"
1030                 "2:\n"
1031                 "       .section .fixup,\"ax\"\n"
1032                 "3:     MOV     D0FrT,%3\n"
1033                 "       SETD    [%1],D0FrT\n"
1034                 "       MOVT    D0FrT,#HI(2b)\n"
1035                 "       JUMP    D0FrT,#LO(2b)\n"
1036                 "       .previous\n"
1037                 "       .section __ex_table,\"a\"\n"
1038                 "       .long 1b,3b\n"
1039                 "       .previous\n"
1040                 : "=r" (x)
1041                 : "r" (err), "r" (addr), "P" (-EFAULT)
1042                 : "D0FrT");
1043         return x;
1044 }
1045 EXPORT_SYMBOL(__get_user_asm_d);
1046
1047 unsigned long long __get_user_asm_l(const void __user *addr, long *err)
1048 {
1049         register unsigned long long x asm ("D0Re0") = 0;
1050         asm volatile (
1051                 "       GETL %0,%t0,[%2]\n"
1052                 "1:\n"
1053                 "       GETL %0,%t0,[%2]\n"
1054                 "2:\n"
1055                 "       .section .fixup,\"ax\"\n"
1056                 "3:     MOV     D0FrT,%3\n"
1057                 "       SETD    [%1],D0FrT\n"
1058                 "       MOVT    D0FrT,#HI(2b)\n"
1059                 "       JUMP    D0FrT,#LO(2b)\n"
1060                 "       .previous\n"
1061                 "       .section __ex_table,\"a\"\n"
1062                 "       .long 1b,3b\n"
1063                 "       .previous\n"
1064                 : "=r" (x)
1065                 : "r" (err), "r" (addr), "P" (-EFAULT)
1066                 : "D0FrT");
1067         return x;
1068 }
1069 EXPORT_SYMBOL(__get_user_asm_l);
1070
1071 long __put_user_asm_b(unsigned int x, void __user *addr)
1072 {
1073         register unsigned int err asm ("D0Re0") = 0;
1074         asm volatile (
1075                 "       MOV  %0,#0\n"
1076                 "       SETB [%2],%1\n"
1077                 "1:\n"
1078                 "       SETB [%2],%1\n"
1079                 "2:\n"
1080                 ".section .fixup,\"ax\"\n"
1081                 "3:     MOV     %0,%3\n"
1082                 "       MOVT    D0FrT,#HI(2b)\n"
1083                 "       JUMP    D0FrT,#LO(2b)\n"
1084                 ".previous\n"
1085                 ".section __ex_table,\"a\"\n"
1086                 "       .long 1b,3b\n"
1087                 ".previous"
1088                 : "=r"(err)
1089                 : "d" (x), "a" (addr), "P"(-EFAULT)
1090                 : "D0FrT");
1091         return err;
1092 }
1093 EXPORT_SYMBOL(__put_user_asm_b);
1094
1095 long __put_user_asm_w(unsigned int x, void __user *addr)
1096 {
1097         register unsigned int err asm ("D0Re0") = 0;
1098         asm volatile (
1099                 "       MOV  %0,#0\n"
1100                 "       SETW [%2],%1\n"
1101                 "1:\n"
1102                 "       SETW [%2],%1\n"
1103                 "2:\n"
1104                 ".section .fixup,\"ax\"\n"
1105                 "3:     MOV     %0,%3\n"
1106                 "       MOVT    D0FrT,#HI(2b)\n"
1107                 "       JUMP    D0FrT,#LO(2b)\n"
1108                 ".previous\n"
1109                 ".section __ex_table,\"a\"\n"
1110                 "       .long 1b,3b\n"
1111                 ".previous"
1112                 : "=r"(err)
1113                 : "d" (x), "a" (addr), "P"(-EFAULT)
1114                 : "D0FrT");
1115         return err;
1116 }
1117 EXPORT_SYMBOL(__put_user_asm_w);
1118
1119 long __put_user_asm_d(unsigned int x, void __user *addr)
1120 {
1121         register unsigned int err asm ("D0Re0") = 0;
1122         asm volatile (
1123                 "       MOV  %0,#0\n"
1124                 "       SETD [%2],%1\n"
1125                 "1:\n"
1126                 "       SETD [%2],%1\n"
1127                 "2:\n"
1128                 ".section .fixup,\"ax\"\n"
1129                 "3:     MOV     %0,%3\n"
1130                 "       MOVT    D0FrT,#HI(2b)\n"
1131                 "       JUMP    D0FrT,#LO(2b)\n"
1132                 ".previous\n"
1133                 ".section __ex_table,\"a\"\n"
1134                 "       .long 1b,3b\n"
1135                 ".previous"
1136                 : "=r"(err)
1137                 : "d" (x), "a" (addr), "P"(-EFAULT)
1138                 : "D0FrT");
1139         return err;
1140 }
1141 EXPORT_SYMBOL(__put_user_asm_d);
1142
1143 long __put_user_asm_l(unsigned long long x, void __user *addr)
1144 {
1145         register unsigned int err asm ("D0Re0") = 0;
1146         asm volatile (
1147                 "       MOV  %0,#0\n"
1148                 "       SETL [%2],%1,%t1\n"
1149                 "1:\n"
1150                 "       SETL [%2],%1,%t1\n"
1151                 "2:\n"
1152                 ".section .fixup,\"ax\"\n"
1153                 "3:     MOV     %0,%3\n"
1154                 "       MOVT    D0FrT,#HI(2b)\n"
1155                 "       JUMP    D0FrT,#LO(2b)\n"
1156                 ".previous\n"
1157                 ".section __ex_table,\"a\"\n"
1158                 "       .long 1b,3b\n"
1159                 ".previous"
1160                 : "=r"(err)
1161                 : "d" (x), "a" (addr), "P"(-EFAULT)
1162                 : "D0FrT");
1163         return err;
1164 }
1165 EXPORT_SYMBOL(__put_user_asm_l);
1166
1167 long strnlen_user(const char __user *src, long count)
1168 {
1169         long res;
1170
1171         if (!access_ok(VERIFY_READ, src, 0))
1172                 return 0;
1173
1174         asm volatile (" MOV     D0Ar4, %1\n"
1175                       " MOV     D0Ar6, %2\n"
1176                       "0:\n"
1177                       " SUBS    D0FrT, D0Ar6, #0\n"
1178                       " SUB     D0Ar6, D0Ar6, #1\n"
1179                       " BLE     2f\n"
1180                       " GETB    D0FrT, [D0Ar4+#1++]\n"
1181                       "1:\n"
1182                       " TST     D0FrT, #255\n"
1183                       " BNE     0b\n"
1184                       "2:\n"
1185                       " SUB     %0, %2, D0Ar6\n"
1186                       "3:\n"
1187                       " .section .fixup,\"ax\"\n"
1188                       "4:\n"
1189                       " MOV     %0, #0\n"
1190                       " MOVT    D0FrT,#HI(3b)\n"
1191                       " JUMP    D0FrT,#LO(3b)\n"
1192                       " .previous\n"
1193                       " .section __ex_table,\"a\"\n"
1194                       " .long 1b,4b\n"
1195                       " .previous\n"
1196                       : "=r" (res)
1197                       : "r" (src), "r" (count)
1198                       : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1199
1200         return res;
1201 }
1202 EXPORT_SYMBOL(strnlen_user);
1203
1204 long __strncpy_from_user(char *dst, const char __user *src, long count)
1205 {
1206         long res;
1207
1208         if (count == 0)
1209                 return 0;
1210
1211         /*
1212          * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1213          *  So do we.
1214          *
1215          *  This code is deduced from:
1216          *
1217          *      char tmp2;
1218          *      long tmp1, tmp3;
1219          *      tmp1 = count;
1220          *      while ((*dst++ = (tmp2 = *src++)) != 0
1221          *             && --tmp1)
1222          *        ;
1223          *
1224          *      res = count - tmp1;
1225          *
1226          *  with tweaks.
1227          */
1228
1229         asm volatile (" MOV  %0,%3\n"
1230                       "1:\n"
1231                       " GETB D0FrT,[%2++]\n"
1232                       "2:\n"
1233                       " CMP  D0FrT,#0\n"
1234                       " SETB [%1++],D0FrT\n"
1235                       " BEQ  3f\n"
1236                       " SUBS %0,%0,#1\n"
1237                       " BNZ  1b\n"
1238                       "3:\n"
1239                       " SUB  %0,%3,%0\n"
1240                       "4:\n"
1241                       " .section .fixup,\"ax\"\n"
1242                       "5:\n"
1243                       " MOV  %0,%7\n"
1244                       " MOVT    D0FrT,#HI(4b)\n"
1245                       " JUMP    D0FrT,#LO(4b)\n"
1246                       " .previous\n"
1247                       " .section __ex_table,\"a\"\n"
1248                       " .long 2b,5b\n"
1249                       " .previous"
1250                       : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1251                       : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1252                       : "D0FrT", "memory", "cc");
1253
1254         return res;
1255 }
1256 EXPORT_SYMBOL(__strncpy_from_user);