2 * arch/arm/include/asm/uaccess.h
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #ifndef _ASMARM_UACCESS_H
9 #define _ASMARM_UACCESS_H
12 * User space memory access functions
14 #include <linux/string.h>
15 #include <asm/memory.h>
16 #include <asm/domain.h>
17 #include <asm/unified.h>
18 #include <asm/compiler.h>
20 #include <asm/extable.h>
23 * These two functions allow hooking accesses to userspace to increase
24 * system integrity by ensuring that the kernel can not inadvertantly
25 * perform such accesses (eg, via list poison values) which could then
26 * be exploited for priviledge escalation.
28 static inline unsigned int uaccess_save_and_enable(void)
30 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
31 unsigned int old_domain = get_domain();
33 /* Set the current domain access to permit user accesses */
34 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
35 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
43 static inline void uaccess_restore(unsigned int flags)
45 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
46 /* Restore the user access mask */
52 * These two are intentionally not defined anywhere - if the kernel
53 * code generates any references to them, that's a bug.
55 extern int __get_user_bad(void);
56 extern int __put_user_bad(void);
59 * Note that this is actually 0x1,0000,0000
61 #define KERNEL_DS 0x00000000
62 #define get_ds() (KERNEL_DS)
66 #define USER_DS TASK_SIZE
67 #define get_fs() (current_thread_info()->addr_limit)
69 static inline void set_fs(mm_segment_t fs)
71 current_thread_info()->addr_limit = fs;
72 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
73 /* On user-mode return, check fs is correct */
74 set_thread_flag(TIF_FSCHECK);
77 #define segment_eq(a, b) ((a) == (b))
79 /* We use 33-bit arithmetic here... */
80 #define __range_ok(addr, size) ({ \
81 unsigned long flag, roksum; \
82 __chk_user_ptr(addr); \
83 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
84 : "=&r" (flag), "=&r" (roksum) \
85 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
90 * Single-value transfer routines. They automatically use the right
91 * size if we just have the right pointer type. Note that the functions
92 * which read from user space (*get_*) need to take care not to leak
93 * kernel data even if the calling code is buggy and fails to check
94 * the return value. This means zeroing out the destination variable
95 * or buffer on error. Normally this is done out of line by the
96 * fixup code, but there are a few places where it intrudes on the
97 * main code path. When we only write to user space, there is no
100 extern int __get_user_1(void *);
101 extern int __get_user_2(void *);
102 extern int __get_user_4(void *);
103 extern int __get_user_32t_8(void *);
104 extern int __get_user_8(void *);
105 extern int __get_user_64t_1(void *);
106 extern int __get_user_64t_2(void *);
107 extern int __get_user_64t_4(void *);
109 #define __GUP_CLOBBER_1 "lr", "cc"
110 #ifdef CONFIG_CPU_USE_DOMAINS
111 #define __GUP_CLOBBER_2 "ip", "lr", "cc"
113 #define __GUP_CLOBBER_2 "lr", "cc"
115 #define __GUP_CLOBBER_4 "lr", "cc"
116 #define __GUP_CLOBBER_32t_8 "lr", "cc"
117 #define __GUP_CLOBBER_8 "lr", "cc"
119 #define __get_user_x(__r2, __p, __e, __l, __s) \
120 __asm__ __volatile__ ( \
121 __asmeq("%0", "r0") __asmeq("%1", "r2") \
122 __asmeq("%3", "r1") \
123 "bl __get_user_" #__s \
124 : "=&r" (__e), "=r" (__r2) \
125 : "0" (__p), "r" (__l) \
126 : __GUP_CLOBBER_##__s)
128 /* narrowing a double-word get into a single 32bit word register: */
130 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \
131 __get_user_x(__r2, __p, __e, __l, 32t_8)
133 #define __get_user_x_32t __get_user_x
137 * storing result into proper least significant word of 64bit target var,
138 * different only for big endian case where 64 bit __r2 lsw is r3:
141 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \
142 __asm__ __volatile__ ( \
143 __asmeq("%0", "r0") __asmeq("%1", "r2") \
144 __asmeq("%3", "r1") \
145 "bl __get_user_64t_" #__s \
146 : "=&r" (__e), "=r" (__r2) \
147 : "0" (__p), "r" (__l) \
148 : __GUP_CLOBBER_##__s)
150 #define __get_user_x_64t __get_user_x
154 #define __get_user_check(x, p) \
156 unsigned long __limit = current_thread_info()->addr_limit - 1; \
157 register const typeof(*(p)) __user *__p asm("r0") = (p);\
158 register typeof(x) __r2 asm("r2"); \
159 register unsigned long __l asm("r1") = __limit; \
160 register int __e asm("r0"); \
161 unsigned int __ua_flags = uaccess_save_and_enable(); \
162 switch (sizeof(*(__p))) { \
164 if (sizeof((x)) >= 8) \
165 __get_user_x_64t(__r2, __p, __e, __l, 1); \
167 __get_user_x(__r2, __p, __e, __l, 1); \
170 if (sizeof((x)) >= 8) \
171 __get_user_x_64t(__r2, __p, __e, __l, 2); \
173 __get_user_x(__r2, __p, __e, __l, 2); \
176 if (sizeof((x)) >= 8) \
177 __get_user_x_64t(__r2, __p, __e, __l, 4); \
179 __get_user_x(__r2, __p, __e, __l, 4); \
182 if (sizeof((x)) < 8) \
183 __get_user_x_32t(__r2, __p, __e, __l, 4); \
185 __get_user_x(__r2, __p, __e, __l, 8); \
187 default: __e = __get_user_bad(); break; \
189 uaccess_restore(__ua_flags); \
190 x = (typeof(*(p))) __r2; \
194 #define get_user(x, p) \
197 __get_user_check(x, p); \
200 extern int __put_user_1(void *, unsigned int);
201 extern int __put_user_2(void *, unsigned int);
202 extern int __put_user_4(void *, unsigned int);
203 extern int __put_user_8(void *, unsigned long long);
205 #define __put_user_check(__pu_val, __ptr, __err, __s) \
207 unsigned long __limit = current_thread_info()->addr_limit - 1; \
208 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
209 register const void __user *__p asm("r0") = __ptr; \
210 register unsigned long __l asm("r1") = __limit; \
211 register int __e asm("r0"); \
212 __asm__ __volatile__ ( \
213 __asmeq("%0", "r0") __asmeq("%2", "r2") \
214 __asmeq("%3", "r1") \
215 "bl __put_user_" #__s \
217 : "0" (__p), "r" (__r2), "r" (__l) \
218 : "ip", "lr", "cc"); \
222 #else /* CONFIG_MMU */
225 * uClinux has only one addr space, so has simplified address limits.
227 #define USER_DS KERNEL_DS
229 #define segment_eq(a, b) (1)
230 #define __addr_ok(addr) ((void)(addr), 1)
231 #define __range_ok(addr, size) ((void)(addr), 0)
232 #define get_fs() (KERNEL_DS)
234 static inline void set_fs(mm_segment_t fs)
238 #define get_user(x, p) __get_user(x, p)
239 #define __put_user_check __put_user_nocheck
241 #endif /* CONFIG_MMU */
243 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
245 #define user_addr_max() \
246 (uaccess_kernel() ? ~0UL : get_fs())
249 * The "__xxx" versions of the user access functions do not verify the
250 * address space - it must have been done previously with a separate
251 * "access_ok()" call.
253 * The "xxx_error" versions set the third argument to EFAULT if an
254 * error occurs, and leave it unchanged on success. Note that these
255 * versions are void (ie, don't return a value as such).
257 #define __get_user(x, ptr) \
260 __get_user_err((x), (ptr), __gu_err); \
264 #define __get_user_error(x, ptr, err) \
266 __get_user_err((x), (ptr), err); \
270 #define __get_user_err(x, ptr, err) \
272 unsigned long __gu_addr = (unsigned long)(ptr); \
273 unsigned long __gu_val; \
274 unsigned int __ua_flags; \
275 __chk_user_ptr(ptr); \
277 __ua_flags = uaccess_save_and_enable(); \
278 switch (sizeof(*(ptr))) { \
279 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
280 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
281 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
282 default: (__gu_val) = __get_user_bad(); \
284 uaccess_restore(__ua_flags); \
285 (x) = (__typeof__(*(ptr)))__gu_val; \
288 #define __get_user_asm(x, addr, err, instr) \
289 __asm__ __volatile__( \
290 "1: " TUSER(instr) " %1, [%2], #0\n" \
292 " .pushsection .text.fixup,\"ax\"\n" \
298 " .pushsection __ex_table,\"a\"\n" \
302 : "+r" (err), "=&r" (x) \
303 : "r" (addr), "i" (-EFAULT) \
306 #define __get_user_asm_byte(x, addr, err) \
307 __get_user_asm(x, addr, err, ldrb)
310 #define __get_user_asm_half(x, __gu_addr, err) \
312 unsigned long __b1, __b2; \
313 __get_user_asm_byte(__b1, __gu_addr, err); \
314 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
315 (x) = __b1 | (__b2 << 8); \
318 #define __get_user_asm_half(x, __gu_addr, err) \
320 unsigned long __b1, __b2; \
321 __get_user_asm_byte(__b1, __gu_addr, err); \
322 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
323 (x) = (__b1 << 8) | __b2; \
327 #define __get_user_asm_word(x, addr, err) \
328 __get_user_asm(x, addr, err, ldr)
331 #define __put_user_switch(x, ptr, __err, __fn) \
333 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
334 __typeof__(*(ptr)) __pu_val = (x); \
335 unsigned int __ua_flags; \
337 __ua_flags = uaccess_save_and_enable(); \
338 switch (sizeof(*(ptr))) { \
339 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
340 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
341 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
342 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
343 default: __err = __put_user_bad(); break; \
345 uaccess_restore(__ua_flags); \
348 #define put_user(x, ptr) \
351 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
355 #define __put_user(x, ptr) \
358 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
362 #define __put_user_error(x, ptr, err) \
364 __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
368 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
370 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
371 __put_user_nocheck_##__size(x, __pu_addr, __err); \
374 #define __put_user_nocheck_1 __put_user_asm_byte
375 #define __put_user_nocheck_2 __put_user_asm_half
376 #define __put_user_nocheck_4 __put_user_asm_word
377 #define __put_user_nocheck_8 __put_user_asm_dword
379 #define __put_user_asm(x, __pu_addr, err, instr) \
380 __asm__ __volatile__( \
381 "1: " TUSER(instr) " %1, [%2], #0\n" \
383 " .pushsection .text.fixup,\"ax\"\n" \
388 " .pushsection __ex_table,\"a\"\n" \
393 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
396 #define __put_user_asm_byte(x, __pu_addr, err) \
397 __put_user_asm(x, __pu_addr, err, strb)
400 #define __put_user_asm_half(x, __pu_addr, err) \
402 unsigned long __temp = (__force unsigned long)(x); \
403 __put_user_asm_byte(__temp, __pu_addr, err); \
404 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
407 #define __put_user_asm_half(x, __pu_addr, err) \
409 unsigned long __temp = (__force unsigned long)(x); \
410 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
411 __put_user_asm_byte(__temp, __pu_addr + 1, err); \
415 #define __put_user_asm_word(x, __pu_addr, err) \
416 __put_user_asm(x, __pu_addr, err, str)
419 #define __reg_oper0 "%R2"
420 #define __reg_oper1 "%Q2"
422 #define __reg_oper0 "%Q2"
423 #define __reg_oper1 "%R2"
426 #define __put_user_asm_dword(x, __pu_addr, err) \
427 __asm__ __volatile__( \
428 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
429 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
430 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
431 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
433 " .pushsection .text.fixup,\"ax\"\n" \
438 " .pushsection __ex_table,\"a\"\n" \
443 : "+r" (err), "+r" (__pu_addr) \
444 : "r" (x), "i" (-EFAULT) \
449 extern unsigned long __must_check
450 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
452 static inline unsigned long __must_check
453 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
455 unsigned int __ua_flags;
457 __ua_flags = uaccess_save_and_enable();
458 n = arm_copy_from_user(to, from, n);
459 uaccess_restore(__ua_flags);
463 extern unsigned long __must_check
464 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
465 extern unsigned long __must_check
466 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
468 static inline unsigned long __must_check
469 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
471 #ifndef CONFIG_UACCESS_WITH_MEMCPY
472 unsigned int __ua_flags;
473 __ua_flags = uaccess_save_and_enable();
474 n = arm_copy_to_user(to, from, n);
475 uaccess_restore(__ua_flags);
478 return arm_copy_to_user(to, from, n);
482 extern unsigned long __must_check
483 arm_clear_user(void __user *addr, unsigned long n);
484 extern unsigned long __must_check
485 __clear_user_std(void __user *addr, unsigned long n);
487 static inline unsigned long __must_check
488 __clear_user(void __user *addr, unsigned long n)
490 unsigned int __ua_flags = uaccess_save_and_enable();
491 n = arm_clear_user(addr, n);
492 uaccess_restore(__ua_flags);
497 static inline unsigned long
498 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
500 memcpy(to, (const void __force *)from, n);
503 static inline unsigned long
504 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
506 memcpy((void __force *)to, from, n);
509 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
511 #define INLINE_COPY_TO_USER
512 #define INLINE_COPY_FROM_USER
514 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
516 if (access_ok(VERIFY_WRITE, to, n))
517 n = __clear_user(to, n);
521 /* These are from lib/ code, and use __get_user() and friends */
522 extern long strncpy_from_user(char *dest, const char __user *src, long count);
524 extern __must_check long strnlen_user(const char __user *str, long n);
526 #endif /* _ASMARM_UACCESS_H */