2 * User address space access functions.
3 * The non-inlined parts of asm-cris/uaccess.h are here.
5 * Copyright (C) 2000, Axis Communications AB.
7 * Written by Hans-Peter Nilsson.
8 * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
11 #include <linux/uaccess.h>
13 /* Asm:s have been tweaked (within the domain of correctness) to give
14 satisfactory results for "gcc version 2.96 20000427 (experimental)".
18 Note that the PC saved at a bus-fault is the address *after* the
19 faulting instruction, which means the branch-target for instructions in
20 delay-slots for taken branches. Note also that the postincrement in
21 the instruction is performed regardless of bus-fault; the register is
22 seen updated in fault handlers.
24 Oh, and on the code formatting issue, to whomever feels like "fixing
25 it" to Conformity: I'm too "lazy", but why don't you go ahead and "fix"
26 string.c too. I just don't think too many people will hack this file
27 for the code format to be an issue. */
30 /* Copy to userspace. This is based on the memcpy used for
31 kernel-to-kernel copying; see "string.c". */
33 unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
35 /* We want the parameters put in special registers.
36 Make sure the compiler is able to make something useful of this.
37 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
39 FIXME: Comment for old gcc version. Check.
40 If gcc was alright, it really would need no temporaries, and no
41 stack space to save stuff on. */
43 register char *dst __asm__ ("r13") = pdst;
44 register const char *src __asm__ ("r11") = psrc;
45 register int n __asm__ ("r12") = pn;
46 register int retn __asm__ ("r10") = 0;
49 /* When src is aligned but not dst, this makes a few extra needless
50 cycles. I believe it would take as many to check that the
51 re-alignment was unnecessary. */
52 if (((unsigned long) dst & 3) != 0
53 /* Don't align if we wouldn't copy more than a few bytes; so we
54 don't have to check further for overflows. */
57 if ((unsigned long) dst & 1)
59 __asm_copy_to_user_1 (dst, src, retn);
63 if ((unsigned long) dst & 2)
65 __asm_copy_to_user_2 (dst, src, retn);
70 /* Decide which copying method to use. */
71 if (n >= 44*2) /* Break even between movem and
72 move16 is at 38.7*2, but modulo 44. */
74 /* For large copies we use 'movem'. */
76 /* It is not optimal to tell the compiler about clobbering any
77 registers; that will move the saving/restoring of those registers
78 to the function prologue/epilogue, and make non-movem sizes
81 This method is not foolproof; it assumes that the "asm reg"
82 declarations at the beginning of the function really are used
83 here (beware: they may be moved to temporary registers).
84 This way, we do not have to save/move the registers around into
85 temporaries; we can safely use them straight away.
87 If you want to check that the allocation was right; then
88 check the equalities in the first comment. It should say
89 "r13=r13, r11=r11, r12=r12". */
91 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
95 ;; Save the registers we'll use in the movem process \n\
100 ;; Now we've got this: \n\
105 ;; Update n for the first loop \n\
108 ; Since the noted PC of a faulting instruction in a delay-slot of a taken \n\
109 ; branch, is that of the branch target, we actually point at the from-movem \n\
110 ; for this case. There is no ambiguity here; if there was a fault in that \n\
111 ; instruction (meaning a kernel oops), the faulted PC would be the address \n\
112 ; after *that* movem. \n\
115 movem [$r11+],$r10 \n\
118 movem $r10,[$r13+] \n\
120 addq 44,$r12 ;; compensate for last loop underflowing n \n\
122 ;; Restore registers from stack \n\
123 movem [$sp+],$r10 \n\
125 .section .fixup,\"ax\" \n\
127 ; To provide a correct count in r10 of bytes that failed to be copied, \n\
128 ; we jump back into the loop if the loop-branch was taken. There is no \n\
129 ; performance penalty for sany use; the program will segfault soon enough.\n\
132 move.d [$sp],$r10 \n\
134 move.d $r10,[$sp] \n\
137 movem [$sp+],$r10 \n\
143 .section __ex_table,\"a\" \n\
148 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
149 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
153 /* Either we directly start copying, using dword copying in a loop, or
154 we copy as much as possible with 'movem' and then the last block (<44
155 bytes) is copied here. This will work since 'movem' will have
156 updated SRC, DST and N. */
160 __asm_copy_to_user_16 (dst, src, retn);
164 /* Having a separate by-four loops cuts down on cache footprint.
165 FIXME: Test with and without; increasing switch to be 0..15. */
168 __asm_copy_to_user_4 (dst, src, retn);
177 __asm_copy_to_user_1 (dst, src, retn);
180 __asm_copy_to_user_2 (dst, src, retn);
183 __asm_copy_to_user_3 (dst, src, retn);
189 EXPORT_SYMBOL(__copy_user);
191 /* Copy from user to kernel. The return-value is the number of bytes that were
194 unsigned long __copy_user_in(void *pdst, const void __user *psrc,
197 /* We want the parameters put in special registers.
198 Make sure the compiler is able to make something useful of this.
199 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
201 FIXME: Comment for old gcc version. Check.
202 If gcc was alright, it really would need no temporaries, and no
203 stack space to save stuff on. */
205 register char *dst __asm__ ("r13") = pdst;
206 register const char *src __asm__ ("r11") = psrc;
207 register int n __asm__ ("r12") = pn;
208 register int retn __asm__ ("r10") = 0;
210 /* The best reason to align src is that we then know that a read-fault
211 was for aligned bytes; there's no 1..3 remaining good bytes to
213 if (((unsigned long) src & 3) != 0)
215 if (((unsigned long) src & 1) && n != 0)
217 __asm_copy_from_user_1 (dst, src, retn);
223 if (((unsigned long) src & 2) && n >= 2)
225 __asm_copy_from_user_2 (dst, src, retn);
232 /* Decide which copying method to use. */
233 if (n >= 44*2) /* Break even between movem and
234 move16 is at 38.7*2, but modulo 44.
235 FIXME: We use move4 now. */
237 /* For large copies we use 'movem' */
239 /* It is not optimal to tell the compiler about clobbering any
240 registers; that will move the saving/restoring of those registers
241 to the function prologue/epilogue, and make non-movem sizes
244 This method is not foolproof; it assumes that the "asm reg"
245 declarations at the beginning of the function really are used
246 here (beware: they may be moved to temporary registers).
247 This way, we do not have to save/move the registers around into
248 temporaries; we can safely use them straight away.
250 If you want to check that the allocation was right; then
251 check the equalities in the first comment. It should say
252 "r13=r13, r11=r11, r12=r12" */
253 __asm__ volatile ("\n\
254 .ifnc %0%1%2%3,$r13$r11$r12$r10 \n\
258 ;; Save the registers we'll use in the movem process \n\
263 ;; Now we've got this: \n\
268 ;; Update n for the first loop \n\
271 movem [$r11+],$r10 \n\
275 movem $r10,[$r13+] \n\
277 addq 44,$r12 ;; compensate for last loop underflowing n \n\
279 ;; Restore registers from stack \n\
280 movem [$sp+],$r10 \n\
282 .section .fixup,\"ax\" \n\
284 ;; Do not jump back into the loop if we fail. For some uses, we get a \n\
285 ;; page fault somewhere on the line. Without checking for page limits, \n\
286 ;; we don't know where, but we need to copy accurately and keep an \n\
287 ;; accurate count; not just clear the whole line. To do that, we fall \n\
288 ;; down in the code below, proceeding with smaller amounts. It should \n\
289 ;; be kept in mind that we have to cater to code like what at one time \n\
290 ;; was in fs/super.c: \n\
291 ;; i = size - copy_from_user((void *)page, data, size); \n\
292 ;; which would cause repeated faults while clearing the remainder of \n\
293 ;; the SIZE bytes at PAGE after the first fault. \n\
294 ;; A caveat here is that we must not fall through from a failing page \n\
295 ;; to a valid page. \n\
298 movem [$sp+],$r10 \n\
299 addq 44,$r12 ;; Get back count before faulting point. \n\
300 subq 44,$r11 ;; Get back pointer to faulting movem-line. \n\
301 jump 4b ;; Fall through, pretending the fault didn't happen.\n\
304 .section __ex_table,\"a\" \n\
308 /* Outputs */ : "=r" (dst), "=r" (src), "=r" (n), "=r" (retn)
309 /* Inputs */ : "0" (dst), "1" (src), "2" (n), "3" (retn));
313 /* Either we directly start copying here, using dword copying in a loop,
314 or we copy as much as possible with 'movem' and then the last block
315 (<44 bytes) is copied here. This will work since 'movem' will have
316 updated src, dst and n. (Except with failing src.)
318 Since we want to keep src accurate, we can't use
319 __asm_copy_from_user_N with N != (1, 2, 4); it updates dst and
320 retn, but not src (by design; it's value is ignored elsewhere). */
324 __asm_copy_from_user_4 (dst, src, retn);
331 /* If we get here, there were no memory read faults. */
334 /* These copies are at least "naturally aligned" (so we don't have
335 to check each byte), due to the src alignment code before the
336 movem loop. The *_3 case *will* get the correct count for retn. */
338 /* This case deliberately left in (if you have doubts check the
339 generated assembly code). */
342 __asm_copy_from_user_1 (dst, src, retn);
345 __asm_copy_from_user_2 (dst, src, retn);
348 __asm_copy_from_user_3 (dst, src, retn);
352 /* If we get here, retn correctly reflects the number of failing
359 EXPORT_SYMBOL(__copy_user_in);
361 /* Zero userspace. */
362 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
364 /* We want the parameters put in special registers.
365 Make sure the compiler is able to make something useful of this.
366 As it is now: r10 -> r13; r11 -> r11 (nop); r12 -> r12 (nop).
368 FIXME: Comment for old gcc version. Check.
369 If gcc was alright, it really would need no temporaries, and no
370 stack space to save stuff on. */
372 register char *dst __asm__ ("r13") = pto;
373 register int n __asm__ ("r12") = pn;
374 register int retn __asm__ ("r10") = 0;
377 if (((unsigned long) dst & 3) != 0
378 /* Don't align if we wouldn't copy more than a few bytes. */
381 if ((unsigned long) dst & 1)
383 __asm_clear_1 (dst, retn);
387 if ((unsigned long) dst & 2)
389 __asm_clear_2 (dst, retn);
394 /* Decide which copying method to use.
395 FIXME: This number is from the "ordinary" kernel memset. */
398 /* For large clears we use 'movem' */
400 /* It is not optimal to tell the compiler about clobbering any
401 call-saved registers; that will move the saving/restoring of
402 those registers to the function prologue/epilogue, and make
403 non-movem sizes suboptimal.
405 This method is not foolproof; it assumes that the "asm reg"
406 declarations at the beginning of the function really are used
407 here (beware: they may be moved to temporary registers).
408 This way, we do not have to save/move the registers around into
409 temporaries; we can safely use them straight away.
411 If you want to check that the allocation was right; then
412 check the equalities in the first comment. It should say
413 something like "r13=r13, r11=r11, r12=r12". */
414 __asm__ volatile ("\n\
415 .ifnc %0%1%2,$r13$r12$r10 \n\
419 ;; Save the registers we'll clobber in the movem process \n\
420 ;; on the stack. Don't mention them to gcc, it will only be \n\
438 ;; Now we've got this: \n\
442 ;; Update n for the first loop \n\
447 movem $r11,[$r13+] \n\
449 addq 12*4,$r12 ;; compensate for last loop underflowing n\n\
451 ;; Restore registers from stack \n\
452 movem [$sp+],$r10 \n\
454 .section .fixup,\"ax\" \n\
456 move.d [$sp],$r10 \n\
458 move.d $r10,[$sp] \n\
463 movem [$sp+],$r10 \n\
469 .section __ex_table,\"a\" \n\
474 /* Outputs */ : "=r" (dst), "=r" (n), "=r" (retn)
475 /* Inputs */ : "0" (dst), "1" (n), "2" (retn)
476 /* Clobber */ : "r11");
481 __asm_clear_16 (dst, retn);
485 /* Having a separate by-four loops cuts down on cache footprint.
486 FIXME: Test with and without; increasing switch to be 0..15. */
489 __asm_clear_4 (dst, retn);
498 __asm_clear_1 (dst, retn);
501 __asm_clear_2 (dst, retn);
504 __asm_clear_3 (dst, retn);
510 EXPORT_SYMBOL(__do_clear_user);