include/asm-x86/uaccess_64.h: checkpatch cleanups - formatting only
[sfrench/cifs-2.6.git] / include / asm-x86 / uaccess_64.h
1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
3
4 /*
5  * User space memory access functions
6  */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <asm/page.h>
11
12 #define VERIFY_READ 0
13 #define VERIFY_WRITE 1
14
15 /*
16  * The fs value determines whether argument validity checking should be
17  * performed or not.  If get_fs() == USER_DS, checking is performed, with
18  * get_fs() == KERNEL_DS, checking is bypassed.
19  *
20  * For historical reasons, these macros are grossly misnamed.
21  */
22
23 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
24
25 #define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26 #define USER_DS         MAKE_MM_SEG(PAGE_OFFSET)
27
28 #define get_ds()        (KERNEL_DS)
29 #define get_fs()        (current_thread_info()->addr_limit)
30 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
31
32 #define segment_eq(a, b)        ((a).seg == (b).seg)
33
34 #define __addr_ok(addr) (!((unsigned long)(addr) &                      \
35                            (current_thread_info()->addr_limit.seg)))
36
37 /*
38  * Uhhuh, this needs 65-bit arithmetic. We have a carry..
39  */
40 #define __range_not_ok(addr, size)                                      \
41 ({                                                                      \
42         unsigned long flag, roksum;                                     \
43         __chk_user_ptr(addr);                                           \
44         asm("# range_ok\n\r"                                            \
45             "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0"         \
46             : "=&r" (flag), "=r" (roksum)                               \
47             : "1" (addr), "g" ((long)(size)),                           \
48               "g" (current_thread_info()->addr_limit.seg));             \
49         flag;                                                           \
50 })
51
52 #define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)
53
54 /*
55  * The exception table consists of pairs of addresses: the first is the
56  * address of an instruction that is allowed to fault, and the second is
57  * the address at which the program should continue.  No registers are
58  * modified, so it is entirely up to the continuation code to figure out
59  * what to do.
60  *
61  * All the routines below use bits of fixup code that are out of line
62  * with the main instruction path.  This means when everything is well,
63  * we don't even have to jump over them.  Further, they do not intrude
64  * on our cache or tlb entries.
65  */
66
67 struct exception_table_entry {
68         unsigned long insn, fixup;
69 };
70
71 extern int fixup_exception(struct pt_regs *regs);
72
73 #define ARCH_HAS_SEARCH_EXTABLE
74
75 /*
76  * These are the main single-value transfer routines.  They automatically
77  * use the right size if we just have the right pointer type.
78  *
79  * This gets kind of ugly. We want to return _two_ values in "get_user()"
80  * and yet we don't want to do any pointers, because that is too much
81  * of a performance impact. Thus we have a few rather ugly macros here,
82  * and hide all the ugliness from the user.
83  *
84  * The "__xxx" versions of the user access functions are versions that
85  * do not verify the address space, that must have been done previously
86  * with a separate "access_ok()" call (this is used when we do multiple
87  * accesses to the same area of user memory).
88  */
89
90 #define __get_user_x(size, ret, x, ptr)               \
91         asm volatile("call __get_user_" #size         \
92                      : "=a" (ret),"=d" (x)            \
93                      : "c" (ptr)                      \
94                      : "r8")
95
96 /* Careful: we have to cast the result to the type of the pointer
97  * for sign reasons */
98
99 #define get_user(x, ptr)                                                \
100 ({                                                                      \
101         unsigned long __val_gu;                                         \
102         int __ret_gu;                                                   \
103         __chk_user_ptr(ptr);                                            \
104         switch (sizeof(*(ptr))) {                                       \
105         case 1:                                                         \
106                 __get_user_x(1, __ret_gu, __val_gu, ptr);               \
107                 break;                                                  \
108         case 2:                                                         \
109                 __get_user_x(2, __ret_gu, __val_gu, ptr);               \
110                 break;                                                  \
111         case 4:                                                         \
112                 __get_user_x(4, __ret_gu, __val_gu, ptr);               \
113                 break;                                                  \
114         case 8:                                                         \
115                 __get_user_x(8, __ret_gu, __val_gu, ptr);               \
116                 break;                                                  \
117         default:                                                        \
118                 __get_user_bad();                                       \
119                 break;                                                  \
120         }                                                               \
121         (x) = (__force typeof(*(ptr)))__val_gu;                         \
122         __ret_gu;                                                       \
123 })
124
125 extern void __put_user_1(void);
126 extern void __put_user_2(void);
127 extern void __put_user_4(void);
128 extern void __put_user_8(void);
129 extern void __put_user_bad(void);
130
131 #define __put_user_x(size, ret, x, ptr)                                 \
132         asm volatile("call __put_user_" #size                           \
133                      :"=a" (ret)                                        \
134                      :"c" (ptr),"d" (x)                                 \
135                      :"r8")
136
137 #define put_user(x, ptr)                                                \
138         __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
139
140 #define __get_user(x, ptr)                                              \
141         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
142 #define __put_user(x, ptr)                                              \
143         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
144
145 #define __get_user_unaligned __get_user
146 #define __put_user_unaligned __put_user
147
148 #define __put_user_nocheck(x, ptr, size)                \
149 ({                                                      \
150         int __pu_err;                                   \
151         __put_user_size((x), (ptr), (size), __pu_err);  \
152         __pu_err;                                       \
153 })
154
155
156 #define __put_user_check(x, ptr, size)                          \
157 ({                                                              \
158         int __pu_err;                                           \
159         typeof(*(ptr)) __user *__pu_addr = (ptr);               \
160         switch (size) {                                         \
161         case 1:                                                 \
162                 __put_user_x(1, __pu_err, x, __pu_addr);        \
163                 break;                                          \
164         case 2:                                                 \
165                 __put_user_x(2, __pu_err, x, __pu_addr);        \
166                 break;                                          \
167         case 4:                                                 \
168                 __put_user_x(4, __pu_err, x, __pu_addr);        \
169                 break;                                          \
170         case 8:                                                 \
171                 __put_user_x(8, __pu_err, x, __pu_addr);        \
172                 break;                                          \
173         default:                                                \
174                 __put_user_bad();                               \
175         }                                                       \
176         __pu_err;                                               \
177 })
178
179 #define __put_user_size(x, ptr, size, retval)                           \
180 do {                                                                    \
181         retval = 0;                                                     \
182         __chk_user_ptr(ptr);                                            \
183         switch (size) {                                                 \
184         case 1:                                                         \
185                 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
186                 break;                                                  \
187         case 2:                                                         \
188                 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
189                 break;                                                  \
190         case 4:                                                         \
191                 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
192                 break;                                                  \
193         case 8:                                                         \
194                 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
195                 break;                                                  \
196         default:                                                        \
197                 __put_user_bad();                                       \
198         }                                                               \
199 } while (0)
200
201 /* FIXME: this hack is definitely wrong -AK */
202 struct __large_struct { unsigned long buf[100]; };
203 #define __m(x) (*(struct __large_struct __user *)(x))
204
205 /*
206  * Tell gcc we read from memory instead of writing: this is because
207  * we do not write to any memory gcc knows about, so there are no
208  * aliasing issues.
209  */
210 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
211         asm volatile("1:        mov"itype" %"rtype"1,%2\n"              \
212                      "2:\n"                                             \
213                      ".section .fixup, \"ax\"\n"                        \
214                      "3:        mov %3,%0\n"                            \
215                      "  jmp 2b\n"                                       \
216                      ".previous\n"                                      \
217                      _ASM_EXTABLE(1b, 3b)                               \
218                      : "=r"(err)                                        \
219                      : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
220
221
222 #define __get_user_nocheck(x, ptr, size)                        \
223 ({                                                              \
224         int __gu_err;                                           \
225         unsigned long __gu_val;                                 \
226         __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
227         (x) = (__force typeof(*(ptr)))__gu_val;                 \
228         __gu_err;                                               \
229 })
230
231 extern int __get_user_1(void);
232 extern int __get_user_2(void);
233 extern int __get_user_4(void);
234 extern int __get_user_8(void);
235 extern int __get_user_bad(void);
236
237 #define __get_user_size(x, ptr, size, retval)                           \
238 do {                                                                    \
239         retval = 0;                                                     \
240         __chk_user_ptr(ptr);                                            \
241         switch (size) {                                                 \
242         case 1:                                                         \
243                 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
244                 break;                                                  \
245         case 2:                                                         \
246                 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
247                 break;                                                  \
248         case 4:                                                         \
249                 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
250                 break;                                                  \
251         case 8:                                                         \
252                 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
253                 break;                                                  \
254         default:                                                        \
255                 (x) = __get_user_bad();                                 \
256         }                                                               \
257 } while (0)
258
259 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno)        \
260         asm volatile("1:        mov"itype" %2,%"rtype"1\n"              \
261                      "2:\n"                                             \
262                      ".section .fixup, \"ax\"\n"                        \
263                      "3:        mov %3,%0\n"                            \
264                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
265                      "  jmp 2b\n"                                       \
266                      ".previous\n"                                      \
267                      _ASM_EXTABLE(1b, 3b)                               \
268                      : "=r" (err), ltype (x)                            \
269                      : "m" (__m(addr)), "i"(errno), "0"(err))
270
271 /*
272  * Copy To/From Userspace
273  */
274
275 /* Handles exceptions in both to and from, but doesn't do access_ok */
276 __must_check unsigned long
277 copy_user_generic(void *to, const void *from, unsigned len);
278
279 __must_check unsigned long
280 copy_to_user(void __user *to, const void *from, unsigned len);
281 __must_check unsigned long
282 copy_from_user(void *to, const void __user *from, unsigned len);
283 __must_check unsigned long
284 copy_in_user(void __user *to, const void __user *from, unsigned len);
285
286 static __always_inline __must_check
287 int __copy_from_user(void *dst, const void __user *src, unsigned size)
288 {
289         int ret = 0;
290         if (!__builtin_constant_p(size))
291                 return copy_user_generic(dst, (__force void *)src, size);
292         switch (size) {
293         case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
294                               ret, "b", "b", "=q", 1);
295                 return ret;
296         case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
297                               ret, "w", "w", "=r", 2);
298                 return ret;
299         case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
300                               ret, "l", "k", "=r", 4);
301                 return ret;
302         case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
303                               ret, "q", "", "=r", 8);
304                 return ret;
305         case 10:
306                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
307                                ret, "q", "", "=r", 16);
308                 if (unlikely(ret))
309                         return ret;
310                 __get_user_asm(*(u16 *)(8 + (char *)dst),
311                                (u16 __user *)(8 + (char __user *)src),
312                                ret, "w", "w", "=r", 2);
313                 return ret;
314         case 16:
315                 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
316                                ret, "q", "", "=r", 16);
317                 if (unlikely(ret))
318                         return ret;
319                 __get_user_asm(*(u64 *)(8 + (char *)dst),
320                                (u64 __user *)(8 + (char __user *)src),
321                                ret, "q", "", "=r", 8);
322                 return ret;
323         default:
324                 return copy_user_generic(dst, (__force void *)src, size);
325         }
326 }
327
328 static __always_inline __must_check
329 int __copy_to_user(void __user *dst, const void *src, unsigned size)
330 {
331         int ret = 0;
332         if (!__builtin_constant_p(size))
333                 return copy_user_generic((__force void *)dst, src, size);
334         switch (size) {
335         case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
336                               ret, "b", "b", "iq", 1);
337                 return ret;
338         case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
339                               ret, "w", "w", "ir", 2);
340                 return ret;
341         case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
342                               ret, "l", "k", "ir", 4);
343                 return ret;
344         case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
345                               ret, "q", "", "ir", 8);
346                 return ret;
347         case 10:
348                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
349                                ret, "q", "", "ir", 10);
350                 if (unlikely(ret))
351                         return ret;
352                 asm("":::"memory");
353                 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
354                                ret, "w", "w", "ir", 2);
355                 return ret;
356         case 16:
357                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
358                                ret, "q", "", "ir", 16);
359                 if (unlikely(ret))
360                         return ret;
361                 asm("":::"memory");
362                 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
363                                ret, "q", "", "ir", 8);
364                 return ret;
365         default:
366                 return copy_user_generic((__force void *)dst, src, size);
367         }
368 }
369
370 static __always_inline __must_check
371 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
372 {
373         int ret = 0;
374         if (!__builtin_constant_p(size))
375                 return copy_user_generic((__force void *)dst,
376                                          (__force void *)src, size);
377         switch (size) {
378         case 1: {
379                 u8 tmp;
380                 __get_user_asm(tmp, (u8 __user *)src,
381                                ret, "b", "b", "=q", 1);
382                 if (likely(!ret))
383                         __put_user_asm(tmp, (u8 __user *)dst,
384                                        ret, "b", "b", "iq", 1);
385                 return ret;
386         }
387         case 2: {
388                 u16 tmp;
389                 __get_user_asm(tmp, (u16 __user *)src,
390                                ret, "w", "w", "=r", 2);
391                 if (likely(!ret))
392                         __put_user_asm(tmp, (u16 __user *)dst,
393                                        ret, "w", "w", "ir", 2);
394                 return ret;
395         }
396
397         case 4: {
398                 u32 tmp;
399                 __get_user_asm(tmp, (u32 __user *)src,
400                                ret, "l", "k", "=r", 4);
401                 if (likely(!ret))
402                         __put_user_asm(tmp, (u32 __user *)dst,
403                                        ret, "l", "k", "ir", 4);
404                 return ret;
405         }
406         case 8: {
407                 u64 tmp;
408                 __get_user_asm(tmp, (u64 __user *)src,
409                                ret, "q", "", "=r", 8);
410                 if (likely(!ret))
411                         __put_user_asm(tmp, (u64 __user *)dst,
412                                        ret, "q", "", "ir", 8);
413                 return ret;
414         }
415         default:
416                 return copy_user_generic((__force void *)dst,
417                                          (__force void *)src, size);
418         }
419 }
420
421 __must_check long
422 strncpy_from_user(char *dst, const char __user *src, long count);
423 __must_check long
424 __strncpy_from_user(char *dst, const char __user *src, long count);
425 __must_check long strnlen_user(const char __user *str, long n);
426 __must_check long __strnlen_user(const char __user *str, long n);
427 __must_check long strlen_user(const char __user *str);
428 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
429 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
430
431 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
432                                             unsigned size);
433
434 static __must_check __always_inline int
435 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
436 {
437         return copy_user_generic((__force void *)dst, src, size);
438 }
439
440 #define ARCH_HAS_NOCACHE_UACCESS 1
441 extern long __copy_user_nocache(void *dst, const void __user *src,
442                                 unsigned size, int zerorest);
443
444 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
445                                            unsigned size)
446 {
447         might_sleep();
448         return __copy_user_nocache(dst, src, size, 1);
449 }
450
451 static inline int __copy_from_user_inatomic_nocache(void *dst,
452                                                     const void __user *src,
453                                                     unsigned size)
454 {
455         return __copy_user_nocache(dst, src, size, 0);
456 }
457
458 #endif /* __X86_64_UACCESS_H */