Merge branch 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[sfrench/cifs-2.6.git] / arch / alpha / include / asm / uaccess.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ALPHA_UACCESS_H
3 #define __ALPHA_UACCESS_H
4
5 /*
6  * The fs value determines whether argument validity checking should be
7  * performed or not.  If get_fs() == USER_DS, checking is performed, with
8  * get_fs() == KERNEL_DS, checking is bypassed.
9  *
10  * Or at least it did once upon a time.  Nowadays it is a mask that
11  * defines which bits of the address space are off limits.  This is a
12  * wee bit faster than the above.
13  *
14  * For historical reasons, these macros are grossly misnamed.
15  */
16
17 #define KERNEL_DS       ((mm_segment_t) { 0UL })
18 #define USER_DS         ((mm_segment_t) { -0x40000000000UL })
19
20 #define get_fs()  (current_thread_info()->addr_limit)
21 #define set_fs(x) (current_thread_info()->addr_limit = (x))
22
23 #define segment_eq(a, b)        ((a).seg == (b).seg)
24
25 /*
26  * Is a address valid? This does a straightforward calculation rather
27  * than tests.
28  *
29  * Address valid if:
30  *  - "addr" doesn't have any high-bits set
31  *  - AND "size" doesn't have any high-bits set
32  *  - AND "addr+size-(size != 0)" doesn't have any high-bits set
33  *  - OR we are in kernel mode.
34  */
35 #define __access_ok(addr, size) ({                              \
36         unsigned long __ao_a = (addr), __ao_b = (size);         \
37         unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b;    \
38         (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; })
39
40 #define access_ok(addr, size)                           \
41 ({                                                      \
42         __chk_user_ptr(addr);                           \
43         __access_ok(((unsigned long)(addr)), (size));   \
44 })
45
46 /*
47  * These are the main single-value transfer routines.  They automatically
48  * use the right size if we just have the right pointer type.
49  *
50  * As the alpha uses the same address space for kernel and user
51  * data, we can just do these as direct assignments.  (Of course, the
52  * exception handling means that it's no longer "just"...)
53  *
54  * Careful to not
55  * (a) re-use the arguments for side effects (sizeof/typeof is ok)
56  * (b) require any knowledge of processes at this stage
57  */
58 #define put_user(x, ptr) \
59   __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
60 #define get_user(x, ptr) \
61   __get_user_check((x), (ptr), sizeof(*(ptr)))
62
63 /*
64  * The "__xxx" versions do not do address space checking, useful when
65  * doing multiple accesses to the same area (the programmer has to do the
66  * checks by hand with "access_ok()")
67  */
68 #define __put_user(x, ptr) \
69   __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
70 #define __get_user(x, ptr) \
71   __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
72   
73 /*
74  * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
75  * encode the bits we need for resolving the exception.  See the
76  * more extensive comments with fixup_inline_exception below for
77  * more information.
78  */
79 #define EXC(label,cont,res,err)                         \
80         ".section __ex_table,\"a\"\n"                   \
81         "       .long "#label"-.\n"                     \
82         "       lda "#res","#cont"-"#label"("#err")\n"  \
83         ".previous\n"
84
85 extern void __get_user_unknown(void);
86
87 #define __get_user_nocheck(x, ptr, size)                        \
88 ({                                                              \
89         long __gu_err = 0;                                      \
90         unsigned long __gu_val;                                 \
91         __chk_user_ptr(ptr);                                    \
92         switch (size) {                                         \
93           case 1: __get_user_8(ptr); break;                     \
94           case 2: __get_user_16(ptr); break;                    \
95           case 4: __get_user_32(ptr); break;                    \
96           case 8: __get_user_64(ptr); break;                    \
97           default: __get_user_unknown(); break;                 \
98         }                                                       \
99         (x) = (__force __typeof__(*(ptr))) __gu_val;            \
100         __gu_err;                                               \
101 })
102
103 #define __get_user_check(x, ptr, size)                          \
104 ({                                                              \
105         long __gu_err = -EFAULT;                                \
106         unsigned long __gu_val = 0;                             \
107         const __typeof__(*(ptr)) __user *__gu_addr = (ptr);     \
108         if (__access_ok((unsigned long)__gu_addr, size)) {      \
109                 __gu_err = 0;                                   \
110                 switch (size) {                                 \
111                   case 1: __get_user_8(__gu_addr); break;       \
112                   case 2: __get_user_16(__gu_addr); break;      \
113                   case 4: __get_user_32(__gu_addr); break;      \
114                   case 8: __get_user_64(__gu_addr); break;      \
115                   default: __get_user_unknown(); break;         \
116                 }                                               \
117         }                                                       \
118         (x) = (__force __typeof__(*(ptr))) __gu_val;            \
119         __gu_err;                                               \
120 })
121
122 struct __large_struct { unsigned long buf[100]; };
123 #define __m(x) (*(struct __large_struct __user *)(x))
124
125 #define __get_user_64(addr)                             \
126         __asm__("1: ldq %0,%2\n"                        \
127         "2:\n"                                          \
128         EXC(1b,2b,%0,%1)                                \
129                 : "=r"(__gu_val), "=r"(__gu_err)        \
130                 : "m"(__m(addr)), "1"(__gu_err))
131
132 #define __get_user_32(addr)                             \
133         __asm__("1: ldl %0,%2\n"                        \
134         "2:\n"                                          \
135         EXC(1b,2b,%0,%1)                                \
136                 : "=r"(__gu_val), "=r"(__gu_err)        \
137                 : "m"(__m(addr)), "1"(__gu_err))
138
139 #ifdef __alpha_bwx__
140 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
141
142 #define __get_user_16(addr)                             \
143         __asm__("1: ldwu %0,%2\n"                       \
144         "2:\n"                                          \
145         EXC(1b,2b,%0,%1)                                \
146                 : "=r"(__gu_val), "=r"(__gu_err)        \
147                 : "m"(__m(addr)), "1"(__gu_err))
148
149 #define __get_user_8(addr)                              \
150         __asm__("1: ldbu %0,%2\n"                       \
151         "2:\n"                                          \
152         EXC(1b,2b,%0,%1)                                \
153                 : "=r"(__gu_val), "=r"(__gu_err)        \
154                 : "m"(__m(addr)), "1"(__gu_err))
155 #else
156 /* Unfortunately, we can't get an unaligned access trap for the sub-word
157    load, so we have to do a general unaligned operation.  */
158
159 #define __get_user_16(addr)                                             \
160 {                                                                       \
161         long __gu_tmp;                                                  \
162         __asm__("1: ldq_u %0,0(%3)\n"                                   \
163         "2:     ldq_u %1,1(%3)\n"                                       \
164         "       extwl %0,%3,%0\n"                                       \
165         "       extwh %1,%3,%1\n"                                       \
166         "       or %0,%1,%0\n"                                          \
167         "3:\n"                                                          \
168         EXC(1b,3b,%0,%2)                                                \
169         EXC(2b,3b,%0,%2)                                                \
170                 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err)      \
171                 : "r"(addr), "2"(__gu_err));                            \
172 }
173
174 #define __get_user_8(addr)                                              \
175         __asm__("1: ldq_u %0,0(%2)\n"                                   \
176         "       extbl %0,%2,%0\n"                                       \
177         "2:\n"                                                          \
178         EXC(1b,2b,%0,%1)                                                \
179                 : "=&r"(__gu_val), "=r"(__gu_err)                       \
180                 : "r"(addr), "1"(__gu_err))
181 #endif
182
183 extern void __put_user_unknown(void);
184
185 #define __put_user_nocheck(x, ptr, size)                        \
186 ({                                                              \
187         long __pu_err = 0;                                      \
188         __chk_user_ptr(ptr);                                    \
189         switch (size) {                                         \
190           case 1: __put_user_8(x, ptr); break;                  \
191           case 2: __put_user_16(x, ptr); break;                 \
192           case 4: __put_user_32(x, ptr); break;                 \
193           case 8: __put_user_64(x, ptr); break;                 \
194           default: __put_user_unknown(); break;                 \
195         }                                                       \
196         __pu_err;                                               \
197 })
198
199 #define __put_user_check(x, ptr, size)                          \
200 ({                                                              \
201         long __pu_err = -EFAULT;                                \
202         __typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
203         if (__access_ok((unsigned long)__pu_addr, size)) {      \
204                 __pu_err = 0;                                   \
205                 switch (size) {                                 \
206                   case 1: __put_user_8(x, __pu_addr); break;    \
207                   case 2: __put_user_16(x, __pu_addr); break;   \
208                   case 4: __put_user_32(x, __pu_addr); break;   \
209                   case 8: __put_user_64(x, __pu_addr); break;   \
210                   default: __put_user_unknown(); break;         \
211                 }                                               \
212         }                                                       \
213         __pu_err;                                               \
214 })
215
216 /*
217  * The "__put_user_xx()" macros tell gcc they read from memory
218  * instead of writing: this is because they do not write to
219  * any memory gcc knows about, so there are no aliasing issues
220  */
221 #define __put_user_64(x, addr)                                  \
222 __asm__ __volatile__("1: stq %r2,%1\n"                          \
223         "2:\n"                                                  \
224         EXC(1b,2b,$31,%0)                                       \
225                 : "=r"(__pu_err)                                \
226                 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
227
228 #define __put_user_32(x, addr)                                  \
229 __asm__ __volatile__("1: stl %r2,%1\n"                          \
230         "2:\n"                                                  \
231         EXC(1b,2b,$31,%0)                                       \
232                 : "=r"(__pu_err)                                \
233                 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
234
235 #ifdef __alpha_bwx__
236 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */
237
238 #define __put_user_16(x, addr)                                  \
239 __asm__ __volatile__("1: stw %r2,%1\n"                          \
240         "2:\n"                                                  \
241         EXC(1b,2b,$31,%0)                                       \
242                 : "=r"(__pu_err)                                \
243                 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
244
245 #define __put_user_8(x, addr)                                   \
246 __asm__ __volatile__("1: stb %r2,%1\n"                          \
247         "2:\n"                                                  \
248         EXC(1b,2b,$31,%0)                                       \
249                 : "=r"(__pu_err)                                \
250                 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
251 #else
252 /* Unfortunately, we can't get an unaligned access trap for the sub-word
253    write, so we have to do a general unaligned operation.  */
254
255 #define __put_user_16(x, addr)                                  \
256 {                                                               \
257         long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4;        \
258         __asm__ __volatile__(                                   \
259         "1:     ldq_u %2,1(%5)\n"                               \
260         "2:     ldq_u %1,0(%5)\n"                               \
261         "       inswh %6,%5,%4\n"                               \
262         "       inswl %6,%5,%3\n"                               \
263         "       mskwh %2,%5,%2\n"                               \
264         "       mskwl %1,%5,%1\n"                               \
265         "       or %2,%4,%2\n"                                  \
266         "       or %1,%3,%1\n"                                  \
267         "3:     stq_u %2,1(%5)\n"                               \
268         "4:     stq_u %1,0(%5)\n"                               \
269         "5:\n"                                                  \
270         EXC(1b,5b,$31,%0)                                       \
271         EXC(2b,5b,$31,%0)                                       \
272         EXC(3b,5b,$31,%0)                                       \
273         EXC(4b,5b,$31,%0)                                       \
274                 : "=r"(__pu_err), "=&r"(__pu_tmp1),             \
275                   "=&r"(__pu_tmp2), "=&r"(__pu_tmp3),           \
276                   "=&r"(__pu_tmp4)                              \
277                 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
278 }
279
280 #define __put_user_8(x, addr)                                   \
281 {                                                               \
282         long __pu_tmp1, __pu_tmp2;                              \
283         __asm__ __volatile__(                                   \
284         "1:     ldq_u %1,0(%4)\n"                               \
285         "       insbl %3,%4,%2\n"                               \
286         "       mskbl %1,%4,%1\n"                               \
287         "       or %1,%2,%1\n"                                  \
288         "2:     stq_u %1,0(%4)\n"                               \
289         "3:\n"                                                  \
290         EXC(1b,3b,$31,%0)                                       \
291         EXC(2b,3b,$31,%0)                                       \
292                 : "=r"(__pu_err),                               \
293                   "=&r"(__pu_tmp1), "=&r"(__pu_tmp2)            \
294                 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
295 }
296 #endif
297
298
299 /*
300  * Complex access routines
301  */
302
303 extern long __copy_user(void *to, const void *from, long len);
304
305 static inline unsigned long
306 raw_copy_from_user(void *to, const void __user *from, unsigned long len)
307 {
308         return __copy_user(to, (__force const void *)from, len);
309 }
310
311 static inline unsigned long
312 raw_copy_to_user(void __user *to, const void *from, unsigned long len)
313 {
314         return __copy_user((__force void *)to, from, len);
315 }
316
317 extern long __clear_user(void __user *to, long len);
318
319 extern inline long
320 clear_user(void __user *to, long len)
321 {
322         if (__access_ok((unsigned long)to, len))
323                 len = __clear_user(to, len);
324         return len;
325 }
326
327 #define user_addr_max() \
328         (uaccess_kernel() ? ~0UL : TASK_SIZE)
329
330 extern long strncpy_from_user(char *dest, const char __user *src, long count);
331 extern __must_check long strnlen_user(const char __user *str, long n);
332
333 #include <asm/extable.h>
334
335 #endif /* __ALPHA_UACCESS_H */