1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
5 #include <asm/asm-compat.h>
6 #include <asm/ppc_asm.h>
7 #include <asm/processor.h>
9 #include <asm/extable.h>
12 * The fs value determines whether argument validity checking should be
13 * performed or not. If get_fs() == USER_DS, checking is performed, with
14 * get_fs() == KERNEL_DS, checking is bypassed.
16 * For historical reasons, these macros are grossly misnamed.
18 * The fs/ds values are now the highest legal address in the "segment".
19 * This simplifies the checking in the routines below.
22 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24 #define KERNEL_DS MAKE_MM_SEG(~0UL)
26 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
27 #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
29 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
32 #define get_ds() (KERNEL_DS)
33 #define get_fs() (current->thread.addr_limit)
35 static inline void set_fs(mm_segment_t fs)
37 current->thread.addr_limit = fs;
38 /* On user-mode return check addr_limit (fs) is correct */
39 set_thread_flag(TIF_FSCHECK);
42 #define segment_eq(a, b) ((a).seg == (b).seg)
44 #define user_addr_max() (get_fs().seg)
48 * This check is sufficient because there is a large enough
49 * gap between user addresses and the kernel addresses
51 #define __access_ok(addr, size, segment) \
52 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
56 static inline int __access_ok(unsigned long addr, unsigned long size,
61 return (size == 0 || size - 1 <= seg.seg - addr);
66 #define access_ok(type, addr, size) \
67 (__chk_user_ptr(addr), \
68 __access_ok((__force unsigned long)(addr), (size), get_fs()))
71 * These are the main single-value transfer routines. They automatically
72 * use the right size if we just have the right pointer type.
74 * This gets kind of ugly. We want to return _two_ values in "get_user()"
75 * and yet we don't want to do any pointers, because that is too much
76 * of a performance impact. Thus we have a few rather ugly macros here,
77 * and hide all the ugliness from the user.
79 * The "__xxx" versions of the user access functions are versions that
80 * do not verify the address space, that must have been done previously
81 * with a separate "access_ok()" call (this is used when we do multiple
82 * accesses to the same area of user memory).
84 * As we use the same address space for kernel and user data on the
85 * PowerPC, we can just do these as direct assignments. (Of course, the
86 * exception handling means that it's no longer "just"...)
89 #define get_user(x, ptr) \
90 __get_user_check((x), (ptr), sizeof(*(ptr)))
91 #define put_user(x, ptr) \
92 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
94 #define __get_user(x, ptr) \
95 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
96 #define __put_user(x, ptr) \
97 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
99 #define __get_user_inatomic(x, ptr) \
100 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
101 #define __put_user_inatomic(x, ptr) \
102 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
104 extern long __put_user_bad(void);
107 * We don't tell gcc that we are accessing memory, but this is OK
108 * because we do not write to any memory gcc knows about, so there
109 * are no aliasing issues.
111 #define __put_user_asm(x, addr, err, op) \
112 __asm__ __volatile__( \
113 "1: " op " %1,0(%2) # put_user\n" \
115 ".section .fixup,\"ax\"\n" \
121 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
124 #define __put_user_asm2(x, ptr, retval) \
125 __put_user_asm(x, ptr, retval, "std")
126 #else /* __powerpc64__ */
127 #define __put_user_asm2(x, addr, err) \
128 __asm__ __volatile__( \
129 "1: stw %1,0(%2)\n" \
130 "2: stw %1+1,4(%2)\n" \
132 ".section .fixup,\"ax\"\n" \
139 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
140 #endif /* __powerpc64__ */
142 #define __put_user_size(x, ptr, size, retval) \
146 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
147 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
148 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
149 case 8: __put_user_asm2(x, ptr, retval); break; \
150 default: __put_user_bad(); \
154 #define __put_user_nocheck(x, ptr, size) \
157 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
158 if (!is_kernel_addr((unsigned long)__pu_addr)) \
160 __chk_user_ptr(ptr); \
161 __put_user_size((x), __pu_addr, (size), __pu_err); \
165 #define __put_user_check(x, ptr, size) \
167 long __pu_err = -EFAULT; \
168 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
170 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
171 __put_user_size((x), __pu_addr, (size), __pu_err); \
175 #define __put_user_nosleep(x, ptr, size) \
178 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
179 __chk_user_ptr(ptr); \
180 __put_user_size((x), __pu_addr, (size), __pu_err); \
185 extern long __get_user_bad(void);
188 * This does an atomic 128 byte aligned load from userspace.
189 * Upto caller to do enable_kernel_vmx() before calling!
191 #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
192 __asm__ __volatile__( \
193 "1: lvx 0,0,%1 # get user\n" \
194 " stvx 0,0,%2 # put kernel\n" \
196 ".section .fixup,\"ax\"\n" \
202 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
204 #define __get_user_asm(x, addr, err, op) \
205 __asm__ __volatile__( \
206 "1: "op" %1,0(%2) # get_user\n" \
208 ".section .fixup,\"ax\"\n" \
214 : "=r" (err), "=r" (x) \
215 : "b" (addr), "i" (-EFAULT), "0" (err))
218 #define __get_user_asm2(x, addr, err) \
219 __get_user_asm(x, addr, err, "ld")
220 #else /* __powerpc64__ */
221 #define __get_user_asm2(x, addr, err) \
222 __asm__ __volatile__( \
223 "1: lwz %1,0(%2)\n" \
224 "2: lwz %1+1,4(%2)\n" \
226 ".section .fixup,\"ax\"\n" \
234 : "=r" (err), "=&r" (x) \
235 : "b" (addr), "i" (-EFAULT), "0" (err))
236 #endif /* __powerpc64__ */
238 #define __get_user_size(x, ptr, size, retval) \
241 __chk_user_ptr(ptr); \
242 if (size > sizeof(x)) \
243 (x) = __get_user_bad(); \
245 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
246 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
247 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
248 case 8: __get_user_asm2(x, ptr, retval); break; \
249 default: (x) = __get_user_bad(); \
253 #define __get_user_nocheck(x, ptr, size) \
256 unsigned long __gu_val; \
257 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
258 __chk_user_ptr(ptr); \
259 if (!is_kernel_addr((unsigned long)__gu_addr)) \
262 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
263 (x) = (__typeof__(*(ptr)))__gu_val; \
267 #define __get_user_check(x, ptr, size) \
269 long __gu_err = -EFAULT; \
270 unsigned long __gu_val = 0; \
271 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
273 if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
275 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
277 (x) = (__force __typeof__(*(ptr)))__gu_val; \
281 #define __get_user_nosleep(x, ptr, size) \
284 unsigned long __gu_val; \
285 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
286 __chk_user_ptr(ptr); \
288 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
289 (x) = (__force __typeof__(*(ptr)))__gu_val; \
294 /* more complex routines */
296 extern unsigned long __copy_tofrom_user(void __user *to,
297 const void __user *from, unsigned long size);
300 static inline unsigned long
301 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
303 return __copy_tofrom_user(to, from, n);
305 #endif /* __powerpc64__ */
307 static inline unsigned long raw_copy_from_user(void *to,
308 const void __user *from, unsigned long n)
310 if (__builtin_constant_p(n) && (n <= 8)) {
311 unsigned long ret = 1;
316 __get_user_size(*(u8 *)to, from, 1, ret);
320 __get_user_size(*(u16 *)to, from, 2, ret);
324 __get_user_size(*(u32 *)to, from, 4, ret);
328 __get_user_size(*(u64 *)to, from, 8, ret);
336 return __copy_tofrom_user((__force void __user *)to, from, n);
339 static inline unsigned long raw_copy_to_user(void __user *to,
340 const void *from, unsigned long n)
342 if (__builtin_constant_p(n) && (n <= 8)) {
343 unsigned long ret = 1;
347 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
350 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
353 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
356 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
363 return __copy_tofrom_user(to, (__force const void __user *)from, n);
366 extern unsigned long __clear_user(void __user *addr, unsigned long size);
368 static inline unsigned long clear_user(void __user *addr, unsigned long size)
371 if (likely(access_ok(VERIFY_WRITE, addr, size)))
372 return __clear_user(addr, size);
376 extern long strncpy_from_user(char *dst, const char __user *src, long count);
377 extern __must_check long strnlen_user(const char __user *str, long n);
379 extern long __copy_from_user_flushcache(void *dst, const void __user *src,
381 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
384 #endif /* _ARCH_POWERPC_UACCESS_H */