2 * User address space access functions.
3 * The non-inlined parts of asm-metag/uaccess.h are here.
5 * Copyright (C) 2006, Imagination Technologies.
6 * Copyright (C) 2000, Axis Communications AB.
8 * Written by Hans-Peter Nilsson.
9 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
10 * Modified for Meta by Will Newton.
13 #include <linux/export.h>
14 #include <linux/uaccess.h>
15 #include <asm/cache.h> /* def of L1_CACHE_BYTES */
18 #define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES)
21 /* The "double write" in this code is because the Meta will not fault
22 * immediately unless the memory pipe is forced to by e.g. a data stall or
23 * another memory op. The second write should be discarded by the write
24 * combiner so should have virtually no cost.
27 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
31 " .section .fixup,\"ax\"\n" \
33 " MOVT D1Ar1,#HI(1b)\n" \
34 " JUMP D1Ar1,#LO(1b)\n" \
36 " .section __ex_table,\"a\"\n" \
39 : "=r" (to), "=r" (from), "=r" (ret) \
40 : "0" (to), "1" (from), "2" (ret) \
44 #define __asm_copy_to_user_1(to, from, ret) \
45 __asm_copy_user_cont(to, from, ret, \
46 " GETB D1Ar1,[%1++]\n" \
47 " SETB [%0],D1Ar1\n" \
48 "2: SETB [%0++],D1Ar1\n", \
49 "3: ADD %2,%2,#1\n", \
52 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
53 __asm_copy_user_cont(to, from, ret, \
54 " GETW D1Ar1,[%1++]\n" \
55 " SETW [%0],D1Ar1\n" \
56 "2: SETW [%0++],D1Ar1\n" COPY, \
57 "3: ADD %2,%2,#2\n" FIXUP, \
58 " .long 2b,3b\n" TENTRY)
60 #define __asm_copy_to_user_2(to, from, ret) \
61 __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
63 #define __asm_copy_to_user_3(to, from, ret) \
64 __asm_copy_to_user_2x_cont(to, from, ret, \
65 " GETB D1Ar1,[%1++]\n" \
66 " SETB [%0],D1Ar1\n" \
67 "4: SETB [%0++],D1Ar1\n", \
68 "5: ADD %2,%2,#1\n", \
71 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
72 __asm_copy_user_cont(to, from, ret, \
73 " GETD D1Ar1,[%1++]\n" \
74 " SETD [%0],D1Ar1\n" \
75 "2: SETD [%0++],D1Ar1\n" COPY, \
76 "3: ADD %2,%2,#4\n" FIXUP, \
77 " .long 2b,3b\n" TENTRY)
79 #define __asm_copy_to_user_4(to, from, ret) \
80 __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
82 #define __asm_copy_to_user_5(to, from, ret) \
83 __asm_copy_to_user_4x_cont(to, from, ret, \
84 " GETB D1Ar1,[%1++]\n" \
85 " SETB [%0],D1Ar1\n" \
86 "4: SETB [%0++],D1Ar1\n", \
87 "5: ADD %2,%2,#1\n", \
90 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
91 __asm_copy_to_user_4x_cont(to, from, ret, \
92 " GETW D1Ar1,[%1++]\n" \
93 " SETW [%0],D1Ar1\n" \
94 "4: SETW [%0++],D1Ar1\n" COPY, \
95 "5: ADD %2,%2,#2\n" FIXUP, \
96 " .long 4b,5b\n" TENTRY)
98 #define __asm_copy_to_user_6(to, from, ret) \
99 __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
101 #define __asm_copy_to_user_7(to, from, ret) \
102 __asm_copy_to_user_6x_cont(to, from, ret, \
103 " GETB D1Ar1,[%1++]\n" \
104 " SETB [%0],D1Ar1\n" \
105 "6: SETB [%0++],D1Ar1\n", \
106 "7: ADD %2,%2,#1\n", \
109 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
110 __asm_copy_to_user_4x_cont(to, from, ret, \
111 " GETD D1Ar1,[%1++]\n" \
112 " SETD [%0],D1Ar1\n" \
113 "4: SETD [%0++],D1Ar1\n" COPY, \
114 "5: ADD %2,%2,#4\n" FIXUP, \
115 " .long 4b,5b\n" TENTRY)
117 #define __asm_copy_to_user_8(to, from, ret) \
118 __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
120 #define __asm_copy_to_user_9(to, from, ret) \
121 __asm_copy_to_user_8x_cont(to, from, ret, \
122 " GETB D1Ar1,[%1++]\n" \
123 " SETB [%0],D1Ar1\n" \
124 "6: SETB [%0++],D1Ar1\n", \
125 "7: ADD %2,%2,#1\n", \
128 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
129 __asm_copy_to_user_8x_cont(to, from, ret, \
130 " GETW D1Ar1,[%1++]\n" \
131 " SETW [%0],D1Ar1\n" \
132 "6: SETW [%0++],D1Ar1\n" COPY, \
133 "7: ADD %2,%2,#2\n" FIXUP, \
134 " .long 6b,7b\n" TENTRY)
136 #define __asm_copy_to_user_10(to, from, ret) \
137 __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
139 #define __asm_copy_to_user_11(to, from, ret) \
140 __asm_copy_to_user_10x_cont(to, from, ret, \
141 " GETB D1Ar1,[%1++]\n" \
142 " SETB [%0],D1Ar1\n" \
143 "8: SETB [%0++],D1Ar1\n", \
144 "9: ADD %2,%2,#1\n", \
147 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
148 __asm_copy_to_user_8x_cont(to, from, ret, \
149 " GETD D1Ar1,[%1++]\n" \
150 " SETD [%0],D1Ar1\n" \
151 "6: SETD [%0++],D1Ar1\n" COPY, \
152 "7: ADD %2,%2,#4\n" FIXUP, \
153 " .long 6b,7b\n" TENTRY)
154 #define __asm_copy_to_user_12(to, from, ret) \
155 __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
157 #define __asm_copy_to_user_13(to, from, ret) \
158 __asm_copy_to_user_12x_cont(to, from, ret, \
159 " GETB D1Ar1,[%1++]\n" \
160 " SETB [%0],D1Ar1\n" \
161 "8: SETB [%0++],D1Ar1\n", \
162 "9: ADD %2,%2,#1\n", \
165 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
166 __asm_copy_to_user_12x_cont(to, from, ret, \
167 " GETW D1Ar1,[%1++]\n" \
168 " SETW [%0],D1Ar1\n" \
169 "8: SETW [%0++],D1Ar1\n" COPY, \
170 "9: ADD %2,%2,#2\n" FIXUP, \
171 " .long 8b,9b\n" TENTRY)
173 #define __asm_copy_to_user_14(to, from, ret) \
174 __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
176 #define __asm_copy_to_user_15(to, from, ret) \
177 __asm_copy_to_user_14x_cont(to, from, ret, \
178 " GETB D1Ar1,[%1++]\n" \
179 " SETB [%0],D1Ar1\n" \
180 "10: SETB [%0++],D1Ar1\n", \
181 "11: ADD %2,%2,#1\n", \
184 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
185 __asm_copy_to_user_12x_cont(to, from, ret, \
186 " GETD D1Ar1,[%1++]\n" \
187 " SETD [%0],D1Ar1\n" \
188 "8: SETD [%0++],D1Ar1\n" COPY, \
189 "9: ADD %2,%2,#4\n" FIXUP, \
190 " .long 8b,9b\n" TENTRY)
192 #define __asm_copy_to_user_16(to, from, ret) \
193 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
195 #define __asm_copy_to_user_8x64(to, from, ret) \
197 " GETL D0Ar2,D1Ar1,[%1++]\n" \
198 " SETL [%0],D0Ar2,D1Ar1\n" \
199 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
201 " .section .fixup,\"ax\"\n" \
202 "3: ADD %2,%2,#8\n" \
203 " MOVT D0Ar2,#HI(1b)\n" \
204 " JUMP D0Ar2,#LO(1b)\n" \
206 " .section __ex_table,\"a\"\n" \
209 : "=r" (to), "=r" (from), "=r" (ret) \
210 : "0" (to), "1" (from), "2" (ret) \
211 : "D1Ar1", "D0Ar2", "memory")
214 * optimized copying loop using RAPF when 64 bit aligned
216 * n will be automatically decremented inside the loop
217 * ret will be left intact. if error occurs we will rewind
218 * so that the original non optimized code will fill up
219 * this value correctly.
222 * > n will hold total number of uncopied bytes
224 * > {'to','from'} will be rewind back so that
225 * the non-optimized code will do the proper fix up
227 * DCACHE drops the cacheline which helps in reducing cache
230 * We introduce an extra SETL at the end of the loop to
231 * ensure we don't fall off the loop before we catch all
235 * LSM_STEP in TXSTATUS must be cleared in fix up code.
236 * since we're using M{S,G}ETL, a fault might happen at
237 * any address in the middle of M{S,G}ETL causing
238 * the value of LSM_STEP to be incorrect which can
239 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
240 * ie: if LSM_STEP was 1 when a fault occurs, the
241 * next call to M{S,G}ET{L,D} will skip the first
242 * copy/getting as it think that the first 1 has already
246 #define __asm_copy_user_64bit_rapf_loop( \
247 to, from, ret, n, id, FIXUP) \
251 " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
253 " LSR D1Ar5, %3, #6\n" \
254 " SUB TXRPT, D1Ar5, #2\n" \
257 " ADD RAPF, %1, #64\n" \
258 "21: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
259 "22: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
260 "23: SUB %3, %3, #32\n" \
261 "24: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
262 "25: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
263 "26: SUB %3, %3, #32\n" \
264 " DCACHE [%1+#-64], D0Ar6\n" \
268 "27: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
269 "28: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
270 "29: SUB %3, %3, #32\n" \
271 "30: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
272 "31: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
273 "32: SETL [%0+#-8], D0.7, D1.7\n" \
274 " SUB %3, %3, #32\n" \
275 "1: DCACHE [%1+#-64], D0Ar6\n" \
276 " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
277 " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
278 " GETL D0.5, D1.5, [A0StP+#-24]\n" \
279 " GETL D0.6, D1.6, [A0StP+#-16]\n" \
280 " GETL D0.7, D1.7, [A0StP+#-8]\n" \
281 " SUB A0StP, A0StP, #40\n" \
282 " .section .fixup,\"ax\"\n" \
283 "3: MOV D0Ar2, TXSTATUS\n" \
284 " MOV D1Ar1, TXSTATUS\n" \
285 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
286 " MOV TXSTATUS, D1Ar1\n" \
288 " MOVT D0Ar2, #HI(1b)\n" \
289 " JUMP D0Ar2, #LO(1b)\n" \
291 " .section __ex_table,\"a\"\n" \
305 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
306 : "0" (to), "1" (from), "2" (ret), "3" (n) \
307 : "D1Ar1", "D0Ar2", "cc", "memory")
309 /* rewind 'to' and 'from' pointers when a fault occurs
312 * A fault always occurs on writing to user buffer. A fault
313 * is at a single address, so we need to rewind by only 4
315 * Since we do a complete read from kernel buffer before
316 * writing, we need to rewind it also. The amount to be
317 * rewind equals the number of faulty writes in MSETD
318 * which is: [4 - (LSM_STEP-1)]*8
319 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
320 * and stored in D0Ar2
322 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
323 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
324 * a fault happens at the 4th write, LSM_STEP will be 0
325 * instead of 4. The code copes with that.
327 * n is updated by the number of successful writes, which is:
328 * n = n - (LSM_STEP-1)*8
330 #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
331 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
332 "LSR D0Ar2, D0Ar2, #8\n" \
333 "ANDS D0Ar2, D0Ar2, #0x7\n" \
334 "ADDZ D0Ar2, D0Ar2, #4\n" \
335 "SUB D0Ar2, D0Ar2, #1\n" \
337 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
338 "LSL D0Ar2, D0Ar2, #3\n" \
339 "LSL D1Ar1, D1Ar1, #3\n" \
340 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
342 "SUB %1, %1,D0Ar2\n" \
343 "SUB %3, %3, D1Ar1\n")
346 * optimized copying loop using RAPF when 32 bit aligned
348 * n will be automatically decremented inside the loop
349 * ret will be left intact. if error occurs we will rewind
350 * so that the original non optimized code will fill up
351 * this value correctly.
354 * > n will hold total number of uncopied bytes
356 * > {'to','from'} will be rewind back so that
357 * the non-optimized code will do the proper fix up
359 * DCACHE drops the cacheline which helps in reducing cache
362 * We introduce an extra SETD at the end of the loop to
363 * ensure we don't fall off the loop before we catch all
367 * LSM_STEP in TXSTATUS must be cleared in fix up code.
368 * since we're using M{S,G}ETL, a fault might happen at
369 * any address in the middle of M{S,G}ETL causing
370 * the value of LSM_STEP to be incorrect which can
371 * cause subsequent use of M{S,G}ET{L,D} to go wrong.
372 * ie: if LSM_STEP was 1 when a fault occurs, the
373 * next call to M{S,G}ET{L,D} will skip the first
374 * copy/getting as it think that the first 1 has already
378 #define __asm_copy_user_32bit_rapf_loop( \
379 to, from, ret, n, id, FIXUP) \
383 " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
385 " LSR D1Ar5, %3, #6\n" \
386 " SUB TXRPT, D1Ar5, #2\n" \
389 " ADD RAPF, %1, #64\n" \
390 "21: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
391 "22: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
392 "23: SUB %3, %3, #16\n" \
393 "24: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
394 "25: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
395 "26: SUB %3, %3, #16\n" \
396 "27: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
397 "28: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
398 "29: SUB %3, %3, #16\n" \
399 "30: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
400 "31: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
401 "32: SUB %3, %3, #16\n" \
402 " DCACHE [%1+#-64], D0Ar6\n" \
406 "33: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
407 "34: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
408 "35: SUB %3, %3, #16\n" \
409 "36: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
410 "37: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
411 "38: SUB %3, %3, #16\n" \
412 "39: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
413 "40: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
414 "41: SUB %3, %3, #16\n" \
415 "42: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
416 "43: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
417 "44: SETD [%0+#-4], D0.7\n" \
418 " SUB %3, %3, #16\n" \
419 "1: DCACHE [%1+#-64], D0Ar6\n" \
420 " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
421 " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
422 " GETL D0.5, D1.5, [A0StP+#-24]\n" \
423 " GETL D0.6, D1.6, [A0StP+#-16]\n" \
424 " GETL D0.7, D1.7, [A0StP+#-8]\n" \
425 " SUB A0StP, A0StP, #40\n" \
426 " .section .fixup,\"ax\"\n" \
427 "3: MOV D0Ar2, TXSTATUS\n" \
428 " MOV D1Ar1, TXSTATUS\n" \
429 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
430 " MOV TXSTATUS, D1Ar1\n" \
432 " MOVT D0Ar2, #HI(1b)\n" \
433 " JUMP D0Ar2, #LO(1b)\n" \
435 " .section __ex_table,\"a\"\n" \
461 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
462 : "0" (to), "1" (from), "2" (ret), "3" (n) \
463 : "D1Ar1", "D0Ar2", "cc", "memory")
465 /* rewind 'to' and 'from' pointers when a fault occurs
468 * A fault always occurs on writing to user buffer. A fault
469 * is at a single address, so we need to rewind by only 4
471 * Since we do a complete read from kernel buffer before
472 * writing, we need to rewind it also. The amount to be
473 * rewind equals the number of faulty writes in MSETD
474 * which is: [4 - (LSM_STEP-1)]*4
475 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
476 * and stored in D0Ar2
478 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
479 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
480 * a fault happens at the 4th write, LSM_STEP will be 0
481 * instead of 4. The code copes with that.
483 * n is updated by the number of successful writes, which is:
484 * n = n - (LSM_STEP-1)*4
486 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
487 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
488 "LSR D0Ar2, D0Ar2, #8\n" \
489 "ANDS D0Ar2, D0Ar2, #0x7\n" \
490 "ADDZ D0Ar2, D0Ar2, #4\n" \
491 "SUB D0Ar2, D0Ar2, #1\n" \
493 "SUB D0Ar2, D1Ar1, D0Ar2\n" \
494 "LSL D0Ar2, D0Ar2, #2\n" \
495 "LSL D1Ar1, D1Ar1, #2\n" \
496 "SUB D1Ar1, D1Ar1, D0Ar2\n" \
498 "SUB %1, %1, D0Ar2\n" \
499 "SUB %3, %3, D1Ar1\n")
501 unsigned long raw_copy_to_user(void __user *pdst, const void *psrc,
504 register char __user *dst asm ("A0.2") = pdst;
505 register const char *src asm ("A1.2") = psrc;
506 unsigned long retn = 0;
511 if ((unsigned long) src & 1) {
512 __asm_copy_to_user_1(dst, src, retn);
517 if ((unsigned long) dst & 1) {
518 /* Worst case - byte copy */
520 __asm_copy_to_user_1(dst, src, retn);
526 if (((unsigned long) src & 2) && n >= 2) {
527 __asm_copy_to_user_2(dst, src, retn);
532 if ((unsigned long) dst & 2) {
533 /* Second worst case - word copy */
535 __asm_copy_to_user_2(dst, src, retn);
543 /* 64 bit copy loop */
544 if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
545 if (n >= RAPF_MIN_BUF_SIZE) {
546 /* copy user using 64 bit rapf copy */
547 __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
551 __asm_copy_to_user_8x64(dst, src, retn);
557 if (n >= RAPF_MIN_BUF_SIZE) {
558 /* copy user using 32 bit rapf copy */
559 __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
562 /* 64 bit copy loop */
563 if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
565 __asm_copy_to_user_8x64(dst, src, retn);
574 __asm_copy_to_user_16(dst, src, retn);
581 __asm_copy_to_user_4(dst, src, retn);
591 __asm_copy_to_user_1(dst, src, retn);
594 __asm_copy_to_user_2(dst, src, retn);
597 __asm_copy_to_user_3(dst, src, retn);
602 * If we get here, retn correctly reflects the number of failing
607 EXPORT_SYMBOL(raw_copy_to_user);
609 #define __asm_copy_from_user_1(to, from, ret) \
610 __asm_copy_user_cont(to, from, ret, \
611 " GETB D1Ar1,[%1++]\n" \
612 "2: SETB [%0++],D1Ar1\n", \
613 "3: ADD %2,%2,#1\n", \
616 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
617 __asm_copy_user_cont(to, from, ret, \
618 " GETW D1Ar1,[%1++]\n" \
619 "2: SETW [%0++],D1Ar1\n" COPY, \
620 "3: ADD %2,%2,#2\n" FIXUP, \
621 " .long 2b,3b\n" TENTRY)
623 #define __asm_copy_from_user_2(to, from, ret) \
624 __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
626 #define __asm_copy_from_user_3(to, from, ret) \
627 __asm_copy_from_user_2x_cont(to, from, ret, \
628 " GETB D1Ar1,[%1++]\n" \
629 "4: SETB [%0++],D1Ar1\n", \
630 "5: ADD %2,%2,#1\n", \
633 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
634 __asm_copy_user_cont(to, from, ret, \
635 " GETD D1Ar1,[%1++]\n" \
636 "2: SETD [%0++],D1Ar1\n" COPY, \
637 "3: ADD %2,%2,#4\n" FIXUP, \
638 " .long 2b,3b\n" TENTRY)
640 #define __asm_copy_from_user_4(to, from, ret) \
641 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
643 #define __asm_copy_from_user_8x64(to, from, ret) \
645 " GETL D0Ar2,D1Ar1,[%1++]\n" \
646 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
648 " .section .fixup,\"ax\"\n" \
649 "3: ADD %2,%2,#8\n" \
650 " MOVT D0Ar2,#HI(1b)\n" \
651 " JUMP D0Ar2,#LO(1b)\n" \
653 " .section __ex_table,\"a\"\n" \
656 : "=a" (to), "=r" (from), "=r" (ret) \
657 : "0" (to), "1" (from), "2" (ret) \
658 : "D1Ar1", "D0Ar2", "memory")
660 /* rewind 'from' pointer when a fault occurs
663 * A fault occurs while reading from user buffer, which is the
665 * Since we don't write to kernel buffer until we read first,
666 * the kernel buffer is at the right state and needn't be
667 * corrected, but the source must be rewound to the beginning of
668 * the block, which is LSM_STEP*8 bytes.
669 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
670 * and stored in D0Ar2
672 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
673 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
674 * a fault happens at the 4th write, LSM_STEP will be 0
675 * instead of 4. The code copes with that.
677 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
678 __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
679 "LSR D0Ar2, D0Ar2, #5\n" \
680 "ANDS D0Ar2, D0Ar2, #0x38\n" \
681 "ADDZ D0Ar2, D0Ar2, #32\n" \
682 "SUB %1, %1, D0Ar2\n")
684 /* rewind 'from' pointer when a fault occurs
687 * A fault occurs while reading from user buffer, which is the
689 * Since we don't write to kernel buffer until we read first,
690 * the kernel buffer is at the right state and needn't be
691 * corrected, but the source must be rewound to the beginning of
692 * the block, which is LSM_STEP*4 bytes.
693 * LSM_STEP is bits 10:8 in TXSTATUS which is already read
694 * and stored in D0Ar2
696 * NOTE: If a fault occurs at the last operation in M{G,S}ETL
697 * LSM_STEP will be 0. ie: we do 4 writes in our case, if
698 * a fault happens at the 4th write, LSM_STEP will be 0
699 * instead of 4. The code copes with that.
701 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
702 __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
703 "LSR D0Ar2, D0Ar2, #6\n" \
704 "ANDS D0Ar2, D0Ar2, #0x1c\n" \
705 "ADDZ D0Ar2, D0Ar2, #16\n" \
706 "SUB %1, %1, D0Ar2\n")
710 * Copy from user to kernel. The return-value is the number of bytes that were
713 unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
716 register char *dst asm ("A0.2") = pdst;
717 register const char __user *src asm ("A1.2") = psrc;
718 unsigned long retn = 0;
723 if ((unsigned long) src & 1) {
724 __asm_copy_from_user_1(dst, src, retn);
729 if ((unsigned long) dst & 1) {
730 /* Worst case - byte copy */
732 __asm_copy_from_user_1(dst, src, retn);
738 if (((unsigned long) src & 2) && n >= 2) {
739 __asm_copy_from_user_2(dst, src, retn);
744 if ((unsigned long) dst & 2) {
745 /* Second worst case - word copy */
747 __asm_copy_from_user_2(dst, src, retn);
755 /* 64 bit copy loop */
756 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
757 if (n >= RAPF_MIN_BUF_SIZE) {
758 /* Copy using fast 64bit rapf */
759 __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
763 __asm_copy_from_user_8x64(dst, src, retn);
770 if (n >= RAPF_MIN_BUF_SIZE) {
771 /* Copy using fast 32bit rapf */
772 __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
776 /* 64 bit copy loop */
777 if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
779 __asm_copy_from_user_8x64(dst, src, retn);
788 __asm_copy_from_user_4(dst, src, retn);
795 /* If we get here, there were no memory read faults. */
797 /* These copies are at least "naturally aligned" (so we don't
798 have to check each byte), due to the src alignment code.
799 The *_3 case *will* get the correct count for retn. */
801 /* This case deliberately left in (if you have doubts check the
802 generated assembly code). */
805 __asm_copy_from_user_1(dst, src, retn);
808 __asm_copy_from_user_2(dst, src, retn);
811 __asm_copy_from_user_3(dst, src, retn);
815 /* If we get here, retn correctly reflects the number of failing
819 EXPORT_SYMBOL(raw_copy_from_user);
821 #define __asm_clear_8x64(to, ret) \
825 " SETL [%0],D0Ar2,D1Ar1\n" \
826 "2: SETL [%0++],D0Ar2,D1Ar1\n" \
828 " .section .fixup,\"ax\"\n" \
829 "3: ADD %1,%1,#8\n" \
830 " MOVT D0Ar2,#HI(1b)\n" \
831 " JUMP D0Ar2,#LO(1b)\n" \
833 " .section __ex_table,\"a\"\n" \
836 : "=r" (to), "=r" (ret) \
837 : "0" (to), "1" (ret) \
838 : "D1Ar1", "D0Ar2", "memory")
840 /* Zero userspace. */
842 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
847 " .section .fixup,\"ax\"\n" \
849 " MOVT D1Ar1,#HI(1b)\n" \
850 " JUMP D1Ar1,#LO(1b)\n" \
852 " .section __ex_table,\"a\"\n" \
855 : "=r" (to), "=r" (ret) \
856 : "0" (to), "1" (ret) \
859 #define __asm_clear_1(to, ret) \
860 __asm_clear(to, ret, \
861 " SETB [%0],D1Ar1\n" \
862 "2: SETB [%0++],D1Ar1\n", \
863 "3: ADD %1,%1,#1\n", \
866 #define __asm_clear_2(to, ret) \
867 __asm_clear(to, ret, \
868 " SETW [%0],D1Ar1\n" \
869 "2: SETW [%0++],D1Ar1\n", \
870 "3: ADD %1,%1,#2\n", \
873 #define __asm_clear_3(to, ret) \
874 __asm_clear(to, ret, \
875 "2: SETW [%0++],D1Ar1\n" \
876 " SETB [%0],D1Ar1\n" \
877 "3: SETB [%0++],D1Ar1\n", \
878 "4: ADD %1,%1,#2\n" \
879 "5: ADD %1,%1,#1\n", \
883 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
884 __asm_clear(to, ret, \
885 " SETD [%0],D1Ar1\n" \
886 "2: SETD [%0++],D1Ar1\n" CLEAR, \
887 "3: ADD %1,%1,#4\n" FIXUP, \
888 " .long 2b,3b\n" TENTRY)
890 #define __asm_clear_4(to, ret) \
891 __asm_clear_4x_cont(to, ret, "", "", "")
893 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
894 __asm_clear_4x_cont(to, ret, \
895 " SETD [%0],D1Ar1\n" \
896 "4: SETD [%0++],D1Ar1\n" CLEAR, \
897 "5: ADD %1,%1,#4\n" FIXUP, \
898 " .long 4b,5b\n" TENTRY)
900 #define __asm_clear_8(to, ret) \
901 __asm_clear_8x_cont(to, ret, "", "", "")
903 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
904 __asm_clear_8x_cont(to, ret, \
905 " SETD [%0],D1Ar1\n" \
906 "6: SETD [%0++],D1Ar1\n" CLEAR, \
907 "7: ADD %1,%1,#4\n" FIXUP, \
908 " .long 6b,7b\n" TENTRY)
910 #define __asm_clear_12(to, ret) \
911 __asm_clear_12x_cont(to, ret, "", "", "")
913 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
914 __asm_clear_12x_cont(to, ret, \
915 " SETD [%0],D1Ar1\n" \
916 "8: SETD [%0++],D1Ar1\n" CLEAR, \
917 "9: ADD %1,%1,#4\n" FIXUP, \
918 " .long 8b,9b\n" TENTRY)
920 #define __asm_clear_16(to, ret) \
921 __asm_clear_16x_cont(to, ret, "", "", "")
923 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
925 register char __user *dst asm ("D0Re0") = pto;
926 register unsigned long n asm ("D1Re0") = pn;
927 register unsigned long retn asm ("D0Ar6") = 0;
929 if ((unsigned long) dst & 1) {
930 __asm_clear_1(dst, retn);
934 if ((unsigned long) dst & 2) {
935 __asm_clear_2(dst, retn);
939 /* 64 bit copy loop */
940 if (!((__force unsigned long) dst & 7)) {
942 __asm_clear_8x64(dst, retn);
948 __asm_clear_16(dst, retn);
953 __asm_clear_4(dst, retn);
961 __asm_clear_1(dst, retn);
964 __asm_clear_2(dst, retn);
967 __asm_clear_3(dst, retn);
973 EXPORT_SYMBOL(__do_clear_user);
975 unsigned char __get_user_asm_b(const void __user *addr, long *err)
977 register unsigned char x asm ("D0Re0") = 0;
983 " .section .fixup,\"ax\"\n"
986 " MOVT D0FrT,#HI(2b)\n"
987 " JUMP D0FrT,#LO(2b)\n"
989 " .section __ex_table,\"a\"\n"
993 : "r" (err), "r" (addr), "P" (-EFAULT)
997 EXPORT_SYMBOL(__get_user_asm_b);
999 unsigned short __get_user_asm_w(const void __user *addr, long *err)
1001 register unsigned short x asm ("D0Re0") = 0;
1007 " .section .fixup,\"ax\"\n"
1009 " SETD [%1],D0FrT\n"
1010 " MOVT D0FrT,#HI(2b)\n"
1011 " JUMP D0FrT,#LO(2b)\n"
1013 " .section __ex_table,\"a\"\n"
1017 : "r" (err), "r" (addr), "P" (-EFAULT)
1021 EXPORT_SYMBOL(__get_user_asm_w);
1023 unsigned int __get_user_asm_d(const void __user *addr, long *err)
1025 register unsigned int x asm ("D0Re0") = 0;
1031 " .section .fixup,\"ax\"\n"
1033 " SETD [%1],D0FrT\n"
1034 " MOVT D0FrT,#HI(2b)\n"
1035 " JUMP D0FrT,#LO(2b)\n"
1037 " .section __ex_table,\"a\"\n"
1041 : "r" (err), "r" (addr), "P" (-EFAULT)
1045 EXPORT_SYMBOL(__get_user_asm_d);
1047 unsigned long long __get_user_asm_l(const void __user *addr, long *err)
1049 register unsigned long long x asm ("D0Re0") = 0;
1051 " GETL %0,%t0,[%2]\n"
1053 " GETL %0,%t0,[%2]\n"
1055 " .section .fixup,\"ax\"\n"
1057 " SETD [%1],D0FrT\n"
1058 " MOVT D0FrT,#HI(2b)\n"
1059 " JUMP D0FrT,#LO(2b)\n"
1061 " .section __ex_table,\"a\"\n"
1065 : "r" (err), "r" (addr), "P" (-EFAULT)
1069 EXPORT_SYMBOL(__get_user_asm_l);
1071 long __put_user_asm_b(unsigned int x, void __user *addr)
1073 register unsigned int err asm ("D0Re0") = 0;
1080 ".section .fixup,\"ax\"\n"
1082 " MOVT D0FrT,#HI(2b)\n"
1083 " JUMP D0FrT,#LO(2b)\n"
1085 ".section __ex_table,\"a\"\n"
1089 : "d" (x), "a" (addr), "P"(-EFAULT)
1093 EXPORT_SYMBOL(__put_user_asm_b);
1095 long __put_user_asm_w(unsigned int x, void __user *addr)
1097 register unsigned int err asm ("D0Re0") = 0;
1104 ".section .fixup,\"ax\"\n"
1106 " MOVT D0FrT,#HI(2b)\n"
1107 " JUMP D0FrT,#LO(2b)\n"
1109 ".section __ex_table,\"a\"\n"
1113 : "d" (x), "a" (addr), "P"(-EFAULT)
1117 EXPORT_SYMBOL(__put_user_asm_w);
1119 long __put_user_asm_d(unsigned int x, void __user *addr)
1121 register unsigned int err asm ("D0Re0") = 0;
1128 ".section .fixup,\"ax\"\n"
1130 " MOVT D0FrT,#HI(2b)\n"
1131 " JUMP D0FrT,#LO(2b)\n"
1133 ".section __ex_table,\"a\"\n"
1137 : "d" (x), "a" (addr), "P"(-EFAULT)
1141 EXPORT_SYMBOL(__put_user_asm_d);
1143 long __put_user_asm_l(unsigned long long x, void __user *addr)
1145 register unsigned int err asm ("D0Re0") = 0;
1148 " SETL [%2],%1,%t1\n"
1150 " SETL [%2],%1,%t1\n"
1152 ".section .fixup,\"ax\"\n"
1154 " MOVT D0FrT,#HI(2b)\n"
1155 " JUMP D0FrT,#LO(2b)\n"
1157 ".section __ex_table,\"a\"\n"
1161 : "d" (x), "a" (addr), "P"(-EFAULT)
1165 EXPORT_SYMBOL(__put_user_asm_l);
1167 long strnlen_user(const char __user *src, long count)
1171 if (!access_ok(VERIFY_READ, src, 0))
1174 asm volatile (" MOV D0Ar4, %1\n"
1177 " SUBS D0FrT, D0Ar6, #0\n"
1178 " SUB D0Ar6, D0Ar6, #1\n"
1180 " GETB D0FrT, [D0Ar4+#1++]\n"
1182 " TST D0FrT, #255\n"
1185 " SUB %0, %2, D0Ar6\n"
1187 " .section .fixup,\"ax\"\n"
1190 " MOVT D0FrT,#HI(3b)\n"
1191 " JUMP D0FrT,#LO(3b)\n"
1193 " .section __ex_table,\"a\"\n"
1197 : "r" (src), "r" (count)
1198 : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1202 EXPORT_SYMBOL(strnlen_user);
1204 long __strncpy_from_user(char *dst, const char __user *src, long count)
1212 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1215 * This code is deduced from:
1220 * while ((*dst++ = (tmp2 = *src++)) != 0
1224 * res = count - tmp1;
1229 asm volatile (" MOV %0,%3\n"
1231 " GETB D0FrT,[%2++]\n"
1234 " SETB [%1++],D0FrT\n"
1241 " .section .fixup,\"ax\"\n"
1244 " MOVT D0FrT,#HI(4b)\n"
1245 " JUMP D0FrT,#LO(4b)\n"
1247 " .section __ex_table,\"a\"\n"
1250 : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1251 : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1252 : "D0FrT", "memory", "cc");
1256 EXPORT_SYMBOL(__strncpy_from_user);