Merge tag 'configfs-5.10' of git://git.infradead.org/users/hch/configfs
[sfrench/cifs-2.6.git] / arch / powerpc / include / asm / uaccess.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
4
5 #include <asm/ppc_asm.h>
6 #include <asm/processor.h>
7 #include <asm/page.h>
8 #include <asm/extable.h>
9 #include <asm/kup.h>
10
11 /*
12  * The fs value determines whether argument validity checking should be
13  * performed or not.  If get_fs() == USER_DS, checking is performed, with
14  * get_fs() == KERNEL_DS, checking is bypassed.
15  *
16  * For historical reasons, these macros are grossly misnamed.
17  *
18  * The fs/ds values are now the highest legal address in the "segment".
19  * This simplifies the checking in the routines below.
20  */
21
22 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
23
24 #define KERNEL_DS       MAKE_MM_SEG(~0UL)
25 #ifdef __powerpc64__
26 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
27 #define USER_DS         MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
28 #else
29 #define USER_DS         MAKE_MM_SEG(TASK_SIZE - 1)
30 #endif
31
32 #define get_fs()        (current->thread.addr_limit)
33
34 static inline void set_fs(mm_segment_t fs)
35 {
36         current->thread.addr_limit = fs;
37         /* On user-mode return check addr_limit (fs) is correct */
38         set_thread_flag(TIF_FSCHECK);
39 }
40
41 #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
42 #define user_addr_max() (get_fs().seg)
43
44 #ifdef __powerpc64__
45 /*
46  * This check is sufficient because there is a large enough
47  * gap between user addresses and the kernel addresses
48  */
49 #define __access_ok(addr, size, segment)        \
50         (((addr) <= (segment).seg) && ((size) <= (segment).seg))
51
52 #else
53
54 static inline int __access_ok(unsigned long addr, unsigned long size,
55                         mm_segment_t seg)
56 {
57         if (addr > seg.seg)
58                 return 0;
59         return (size == 0 || size - 1 <= seg.seg - addr);
60 }
61
62 #endif
63
64 #define access_ok(addr, size)           \
65         (__chk_user_ptr(addr),          \
66          __access_ok((__force unsigned long)(addr), (size), get_fs()))
67
68 /*
69  * These are the main single-value transfer routines.  They automatically
70  * use the right size if we just have the right pointer type.
71  *
72  * This gets kind of ugly. We want to return _two_ values in "get_user()"
73  * and yet we don't want to do any pointers, because that is too much
74  * of a performance impact. Thus we have a few rather ugly macros here,
75  * and hide all the ugliness from the user.
76  *
77  * The "__xxx" versions of the user access functions are versions that
78  * do not verify the address space, that must have been done previously
79  * with a separate "access_ok()" call (this is used when we do multiple
80  * accesses to the same area of user memory).
81  *
82  * As we use the same address space for kernel and user data on the
83  * PowerPC, we can just do these as direct assignments.  (Of course, the
84  * exception handling means that it's no longer "just"...)
85  *
86  */
87 #define get_user(x, ptr) \
88         __get_user_check((x), (ptr), sizeof(*(ptr)))
89 #define put_user(x, ptr) \
90         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
91
92 #define __get_user(x, ptr) \
93         __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
94 #define __put_user(x, ptr) \
95         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
96 #define __put_user_goto(x, ptr, label) \
97         __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
98
99 #define __get_user_allowed(x, ptr) \
100         __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
101
102 #define __get_user_inatomic(x, ptr) \
103         __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
104 #define __put_user_inatomic(x, ptr) \
105         __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
106
107 #ifdef CONFIG_PPC64
108
109 #define ___get_user_instr(gu_op, dest, ptr)                             \
110 ({                                                                      \
111         long __gui_ret = 0;                                             \
112         unsigned long __gui_ptr = (unsigned long)ptr;                   \
113         struct ppc_inst __gui_inst;                                     \
114         unsigned int __prefix, __suffix;                                \
115         __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr);  \
116         if (__gui_ret == 0) {                                           \
117                 if ((__prefix >> 26) == OP_PREFIX) {                    \
118                         __gui_ret = gu_op(__suffix,                     \
119                                 (unsigned int __user *)__gui_ptr + 1);  \
120                         __gui_inst = ppc_inst_prefix(__prefix,          \
121                                                      __suffix);         \
122                 } else {                                                \
123                         __gui_inst = ppc_inst(__prefix);                \
124                 }                                                       \
125                 if (__gui_ret == 0)                                     \
126                         (dest) = __gui_inst;                            \
127         }                                                               \
128         __gui_ret;                                                      \
129 })
130
131 #define get_user_instr(x, ptr) \
132         ___get_user_instr(get_user, x, ptr)
133
134 #define __get_user_instr(x, ptr) \
135         ___get_user_instr(__get_user, x, ptr)
136
137 #define __get_user_instr_inatomic(x, ptr) \
138         ___get_user_instr(__get_user_inatomic, x, ptr)
139
140 #else /* !CONFIG_PPC64 */
141 #define get_user_instr(x, ptr) \
142         get_user((x).val, (u32 __user *)(ptr))
143
144 #define __get_user_instr(x, ptr) \
145         __get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
146
147 #define __get_user_instr_inatomic(x, ptr) \
148         __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
149
150 #endif /* CONFIG_PPC64 */
151
152 extern long __put_user_bad(void);
153
154 /*
155  * We don't tell gcc that we are accessing memory, but this is OK
156  * because we do not write to any memory gcc knows about, so there
157  * are no aliasing issues.
158  */
159 #define __put_user_asm(x, addr, err, op)                        \
160         __asm__ __volatile__(                                   \
161                 "1:     " op " %1,0(%2) # put_user\n"           \
162                 "2:\n"                                          \
163                 ".section .fixup,\"ax\"\n"                      \
164                 "3:     li %0,%3\n"                             \
165                 "       b 2b\n"                                 \
166                 ".previous\n"                                   \
167                 EX_TABLE(1b, 3b)                                \
168                 : "=r" (err)                                    \
169                 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
170
171 #ifdef __powerpc64__
172 #define __put_user_asm2(x, ptr, retval)                         \
173           __put_user_asm(x, ptr, retval, "std")
174 #else /* __powerpc64__ */
175 #define __put_user_asm2(x, addr, err)                           \
176         __asm__ __volatile__(                                   \
177                 "1:     stw %1,0(%2)\n"                         \
178                 "2:     stw %1+1,4(%2)\n"                       \
179                 "3:\n"                                          \
180                 ".section .fixup,\"ax\"\n"                      \
181                 "4:     li %0,%3\n"                             \
182                 "       b 3b\n"                                 \
183                 ".previous\n"                                   \
184                 EX_TABLE(1b, 4b)                                \
185                 EX_TABLE(2b, 4b)                                \
186                 : "=r" (err)                                    \
187                 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
188 #endif /* __powerpc64__ */
189
190 #define __put_user_size_allowed(x, ptr, size, retval)           \
191 do {                                                            \
192         retval = 0;                                             \
193         switch (size) {                                         \
194           case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
195           case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
196           case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
197           case 8: __put_user_asm2(x, ptr, retval); break;       \
198           default: __put_user_bad();                            \
199         }                                                       \
200 } while (0)
201
202 #define __put_user_size(x, ptr, size, retval)                   \
203 do {                                                            \
204         allow_write_to_user(ptr, size);                         \
205         __put_user_size_allowed(x, ptr, size, retval);          \
206         prevent_write_to_user(ptr, size);                       \
207 } while (0)
208
209 #define __put_user_nocheck(x, ptr, size)                        \
210 ({                                                              \
211         long __pu_err;                                          \
212         __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
213         __typeof__(*(ptr)) __pu_val = (x);                      \
214         __typeof__(size) __pu_size = (size);                    \
215                                                                 \
216         if (!is_kernel_addr((unsigned long)__pu_addr))          \
217                 might_fault();                                  \
218         __chk_user_ptr(__pu_addr);                              \
219         __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err);      \
220                                                                 \
221         __pu_err;                                               \
222 })
223
224 #define __put_user_check(x, ptr, size)                                  \
225 ({                                                                      \
226         long __pu_err = -EFAULT;                                        \
227         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
228         __typeof__(*(ptr)) __pu_val = (x);                              \
229         __typeof__(size) __pu_size = (size);                            \
230                                                                         \
231         might_fault();                                                  \
232         if (access_ok(__pu_addr, __pu_size))                            \
233                 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
234                                                                         \
235         __pu_err;                                                       \
236 })
237
238 #define __put_user_nosleep(x, ptr, size)                        \
239 ({                                                              \
240         long __pu_err;                                          \
241         __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
242         __typeof__(*(ptr)) __pu_val = (x);                      \
243         __typeof__(size) __pu_size = (size);                    \
244                                                                 \
245         __chk_user_ptr(__pu_addr);                              \
246         __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
247                                                                 \
248         __pu_err;                                               \
249 })
250
251
252 #define __put_user_asm_goto(x, addr, label, op)                 \
253         asm volatile goto(                                      \
254                 "1:     " op "%U1%X1 %0,%1      # put_user\n"   \
255                 EX_TABLE(1b, %l2)                               \
256                 :                                               \
257                 : "r" (x), "m" (*addr)                          \
258                 :                                               \
259                 : label)
260
261 #ifdef __powerpc64__
262 #define __put_user_asm2_goto(x, ptr, label)                     \
263         __put_user_asm_goto(x, ptr, label, "std")
264 #else /* __powerpc64__ */
265 #define __put_user_asm2_goto(x, addr, label)                    \
266         asm volatile goto(                                      \
267                 "1:     stw%X1 %0, %1\n"                        \
268                 "2:     stw%X1 %L0, %L1\n"                      \
269                 EX_TABLE(1b, %l2)                               \
270                 EX_TABLE(2b, %l2)                               \
271                 :                                               \
272                 : "r" (x), "m" (*addr)                          \
273                 :                                               \
274                 : label)
275 #endif /* __powerpc64__ */
276
277 #define __put_user_size_goto(x, ptr, size, label)               \
278 do {                                                            \
279         switch (size) {                                         \
280         case 1: __put_user_asm_goto(x, ptr, label, "stb"); break;       \
281         case 2: __put_user_asm_goto(x, ptr, label, "sth"); break;       \
282         case 4: __put_user_asm_goto(x, ptr, label, "stw"); break;       \
283         case 8: __put_user_asm2_goto(x, ptr, label); break;     \
284         default: __put_user_bad();                              \
285         }                                                       \
286 } while (0)
287
288 #define __put_user_nocheck_goto(x, ptr, size, label)            \
289 do {                                                            \
290         __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
291         if (!is_kernel_addr((unsigned long)__pu_addr))          \
292                 might_fault();                                  \
293         __chk_user_ptr(ptr);                                    \
294         __put_user_size_goto((x), __pu_addr, (size), label);    \
295 } while (0)
296
297
298 extern long __get_user_bad(void);
299
300 /*
301  * This does an atomic 128 byte aligned load from userspace.
302  * Upto caller to do enable_kernel_vmx() before calling!
303  */
304 #define __get_user_atomic_128_aligned(kaddr, uaddr, err)                \
305         __asm__ __volatile__(                           \
306                 "1:     lvx  0,0,%1     # get user\n"   \
307                 "       stvx 0,0,%2     # put kernel\n" \
308                 "2:\n"                                  \
309                 ".section .fixup,\"ax\"\n"              \
310                 "3:     li %0,%3\n"                     \
311                 "       b 2b\n"                         \
312                 ".previous\n"                           \
313                 EX_TABLE(1b, 3b)                        \
314                 : "=r" (err)                    \
315                 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
316
317 #define __get_user_asm(x, addr, err, op)                \
318         __asm__ __volatile__(                           \
319                 "1:     "op" %1,0(%2)   # get_user\n"   \
320                 "2:\n"                                  \
321                 ".section .fixup,\"ax\"\n"              \
322                 "3:     li %0,%3\n"                     \
323                 "       li %1,0\n"                      \
324                 "       b 2b\n"                         \
325                 ".previous\n"                           \
326                 EX_TABLE(1b, 3b)                        \
327                 : "=r" (err), "=r" (x)                  \
328                 : "b" (addr), "i" (-EFAULT), "0" (err))
329
330 #ifdef __powerpc64__
331 #define __get_user_asm2(x, addr, err)                   \
332         __get_user_asm(x, addr, err, "ld")
333 #else /* __powerpc64__ */
334 #define __get_user_asm2(x, addr, err)                   \
335         __asm__ __volatile__(                           \
336                 "1:     lwz %1,0(%2)\n"                 \
337                 "2:     lwz %1+1,4(%2)\n"               \
338                 "3:\n"                                  \
339                 ".section .fixup,\"ax\"\n"              \
340                 "4:     li %0,%3\n"                     \
341                 "       li %1,0\n"                      \
342                 "       li %1+1,0\n"                    \
343                 "       b 3b\n"                         \
344                 ".previous\n"                           \
345                 EX_TABLE(1b, 4b)                        \
346                 EX_TABLE(2b, 4b)                        \
347                 : "=r" (err), "=&r" (x)                 \
348                 : "b" (addr), "i" (-EFAULT), "0" (err))
349 #endif /* __powerpc64__ */
350
351 #define __get_user_size_allowed(x, ptr, size, retval)           \
352 do {                                                            \
353         retval = 0;                                             \
354         __chk_user_ptr(ptr);                                    \
355         if (size > sizeof(x))                                   \
356                 (x) = __get_user_bad();                         \
357         switch (size) {                                         \
358         case 1: __get_user_asm(x, ptr, retval, "lbz"); break;   \
359         case 2: __get_user_asm(x, ptr, retval, "lhz"); break;   \
360         case 4: __get_user_asm(x, ptr, retval, "lwz"); break;   \
361         case 8: __get_user_asm2(x, ptr, retval);  break;        \
362         default: (x) = __get_user_bad();                        \
363         }                                                       \
364 } while (0)
365
366 #define __get_user_size(x, ptr, size, retval)                   \
367 do {                                                            \
368         allow_read_from_user(ptr, size);                        \
369         __get_user_size_allowed(x, ptr, size, retval);          \
370         prevent_read_from_user(ptr, size);                      \
371 } while (0)
372
373 /*
374  * This is a type: either unsigned long, if the argument fits into
375  * that type, or otherwise unsigned long long.
376  */
377 #define __long_type(x) \
378         __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
379
380 #define __get_user_nocheck(x, ptr, size, do_allow)                      \
381 ({                                                              \
382         long __gu_err;                                          \
383         __long_type(*(ptr)) __gu_val;                           \
384         __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
385         __typeof__(size) __gu_size = (size);                    \
386                                                                 \
387         __chk_user_ptr(__gu_addr);                              \
388         if (!is_kernel_addr((unsigned long)__gu_addr))          \
389                 might_fault();                                  \
390         barrier_nospec();                                       \
391         if (do_allow)                                                           \
392                 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err);      \
393         else                                                                    \
394                 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
395         (x) = (__typeof__(*(ptr)))__gu_val;                     \
396                                                                 \
397         __gu_err;                                               \
398 })
399
400 #define __get_user_check(x, ptr, size)                                  \
401 ({                                                                      \
402         long __gu_err = -EFAULT;                                        \
403         __long_type(*(ptr)) __gu_val = 0;                               \
404         __typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
405         __typeof__(size) __gu_size = (size);                            \
406                                                                         \
407         might_fault();                                                  \
408         if (access_ok(__gu_addr, __gu_size)) {                          \
409                 barrier_nospec();                                       \
410                 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
411         }                                                               \
412         (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
413                                                                         \
414         __gu_err;                                                       \
415 })
416
417 #define __get_user_nosleep(x, ptr, size)                        \
418 ({                                                              \
419         long __gu_err;                                          \
420         __long_type(*(ptr)) __gu_val;                           \
421         __typeof__(*(ptr)) __user *__gu_addr = (ptr);   \
422         __typeof__(size) __gu_size = (size);                    \
423                                                                 \
424         __chk_user_ptr(__gu_addr);                              \
425         barrier_nospec();                                       \
426         __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
427         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
428                                                                 \
429         __gu_err;                                               \
430 })
431
432
433 /* more complex routines */
434
435 extern unsigned long __copy_tofrom_user(void __user *to,
436                 const void __user *from, unsigned long size);
437
438 #ifdef CONFIG_ARCH_HAS_COPY_MC
439 unsigned long __must_check
440 copy_mc_generic(void *to, const void *from, unsigned long size);
441
442 static inline unsigned long __must_check
443 copy_mc_to_kernel(void *to, const void *from, unsigned long size)
444 {
445         return copy_mc_generic(to, from, size);
446 }
447 #define copy_mc_to_kernel copy_mc_to_kernel
448
449 static inline unsigned long __must_check
450 copy_mc_to_user(void __user *to, const void *from, unsigned long n)
451 {
452         if (likely(check_copy_size(from, n, true))) {
453                 if (access_ok(to, n)) {
454                         allow_write_to_user(to, n);
455                         n = copy_mc_generic((void *)to, from, n);
456                         prevent_write_to_user(to, n);
457                 }
458         }
459
460         return n;
461 }
462 #endif
463
464 #ifdef __powerpc64__
465 static inline unsigned long
466 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
467 {
468         unsigned long ret;
469
470         barrier_nospec();
471         allow_read_write_user(to, from, n);
472         ret = __copy_tofrom_user(to, from, n);
473         prevent_read_write_user(to, from, n);
474         return ret;
475 }
476 #endif /* __powerpc64__ */
477
478 static inline unsigned long raw_copy_from_user(void *to,
479                 const void __user *from, unsigned long n)
480 {
481         unsigned long ret;
482         if (__builtin_constant_p(n) && (n <= 8)) {
483                 ret = 1;
484
485                 switch (n) {
486                 case 1:
487                         barrier_nospec();
488                         __get_user_size(*(u8 *)to, from, 1, ret);
489                         break;
490                 case 2:
491                         barrier_nospec();
492                         __get_user_size(*(u16 *)to, from, 2, ret);
493                         break;
494                 case 4:
495                         barrier_nospec();
496                         __get_user_size(*(u32 *)to, from, 4, ret);
497                         break;
498                 case 8:
499                         barrier_nospec();
500                         __get_user_size(*(u64 *)to, from, 8, ret);
501                         break;
502                 }
503                 if (ret == 0)
504                         return 0;
505         }
506
507         barrier_nospec();
508         allow_read_from_user(from, n);
509         ret = __copy_tofrom_user((__force void __user *)to, from, n);
510         prevent_read_from_user(from, n);
511         return ret;
512 }
513
514 static inline unsigned long
515 raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
516 {
517         if (__builtin_constant_p(n) && (n <= 8)) {
518                 unsigned long ret = 1;
519
520                 switch (n) {
521                 case 1:
522                         __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
523                         break;
524                 case 2:
525                         __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
526                         break;
527                 case 4:
528                         __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
529                         break;
530                 case 8:
531                         __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
532                         break;
533                 }
534                 if (ret == 0)
535                         return 0;
536         }
537
538         return __copy_tofrom_user(to, (__force const void __user *)from, n);
539 }
540
541 static inline unsigned long
542 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
543 {
544         unsigned long ret;
545
546         allow_write_to_user(to, n);
547         ret = raw_copy_to_user_allowed(to, from, n);
548         prevent_write_to_user(to, n);
549         return ret;
550 }
551
552 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
553
554 static inline unsigned long clear_user(void __user *addr, unsigned long size)
555 {
556         unsigned long ret = size;
557         might_fault();
558         if (likely(access_ok(addr, size))) {
559                 allow_write_to_user(addr, size);
560                 ret = __arch_clear_user(addr, size);
561                 prevent_write_to_user(addr, size);
562         }
563         return ret;
564 }
565
566 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
567 {
568         return clear_user(addr, size);
569 }
570
571 extern long strncpy_from_user(char *dst, const char __user *src, long count);
572 extern __must_check long strnlen_user(const char __user *str, long n);
573
574 extern long __copy_from_user_flushcache(void *dst, const void __user *src,
575                 unsigned size);
576 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
577                            size_t len);
578
579 static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
580 {
581         if (unlikely(!access_ok(ptr, len)))
582                 return false;
583         allow_read_write_user((void __user *)ptr, ptr, len);
584         return true;
585 }
586 #define user_access_begin       user_access_begin
587 #define user_access_end         prevent_current_access_user
588 #define user_access_save        prevent_user_access_return
589 #define user_access_restore     restore_user_access
590
591 static __must_check inline bool
592 user_read_access_begin(const void __user *ptr, size_t len)
593 {
594         if (unlikely(!access_ok(ptr, len)))
595                 return false;
596         allow_read_from_user(ptr, len);
597         return true;
598 }
599 #define user_read_access_begin  user_read_access_begin
600 #define user_read_access_end            prevent_current_read_from_user
601
602 static __must_check inline bool
603 user_write_access_begin(const void __user *ptr, size_t len)
604 {
605         if (unlikely(!access_ok(ptr, len)))
606                 return false;
607         allow_write_to_user((void __user *)ptr, len);
608         return true;
609 }
610 #define user_write_access_begin user_write_access_begin
611 #define user_write_access_end           prevent_current_write_to_user
612
613 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
614 #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
615 #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
616
617 #define unsafe_copy_to_user(d, s, l, e) \
618 do {                                                                    \
619         u8 __user *_dst = (u8 __user *)(d);                             \
620         const u8 *_src = (const u8 *)(s);                               \
621         size_t _len = (l);                                              \
622         int _i;                                                         \
623                                                                         \
624         for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long))             \
625                 __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\
626         if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) {                   \
627                 __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e);      \
628                 _i += 4;                                                \
629         }                                                               \
630         if (_len & 2) {                                                 \
631                 __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e);      \
632                 _i += 2;                                                \
633         }                                                               \
634         if (_len & 1) \
635                 __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\
636 } while (0)
637
638 #endif  /* _ARCH_POWERPC_UACCESS_H */