Merge branch 'x86-entry-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / x86 / include / asm / uaccess_64.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
4
5 /*
6  * User space memory access functions
7  */
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
13 #include <asm/page.h>
14
15 /*
16  * Copy To/From Userspace
17  */
18
19 /* Handles exceptions in both to and from, but doesn't do access_ok */
20 __must_check unsigned long
21 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22 __must_check unsigned long
23 copy_user_generic_string(void *to, const void *from, unsigned len);
24 __must_check unsigned long
25 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26
27 static __always_inline __must_check unsigned long
28 copy_user_generic(void *to, const void *from, unsigned len)
29 {
30         unsigned ret;
31
32         /*
33          * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34          * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35          * Otherwise, use copy_user_generic_unrolled.
36          */
37         alternative_call_2(copy_user_generic_unrolled,
38                          copy_user_generic_string,
39                          X86_FEATURE_REP_GOOD,
40                          copy_user_enhanced_fast_string,
41                          X86_FEATURE_ERMS,
42                          ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43                                      "=d" (len)),
44                          "1" (to), "2" (from), "3" (len)
45                          : "memory", "rcx", "r8", "r9", "r10", "r11");
46         return ret;
47 }
48
49 static __always_inline __must_check unsigned long
50 copy_to_user_mcsafe(void *to, const void *from, unsigned len)
51 {
52         unsigned long ret;
53
54         __uaccess_begin();
55         /*
56          * Note, __memcpy_mcsafe() is explicitly used since it can
57          * handle exceptions / faults.  memcpy_mcsafe() may fall back to
58          * memcpy() which lacks this handling.
59          */
60         ret = __memcpy_mcsafe(to, from, len);
61         __uaccess_end();
62         return ret;
63 }
64
65 static __always_inline __must_check unsigned long
66 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
67 {
68         int ret = 0;
69
70         if (!__builtin_constant_p(size))
71                 return copy_user_generic(dst, (__force void *)src, size);
72         switch (size) {
73         case 1:
74                 __uaccess_begin_nospec();
75                 __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
76                               ret, "b", "b", "=q", 1);
77                 __uaccess_end();
78                 return ret;
79         case 2:
80                 __uaccess_begin_nospec();
81                 __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
82                               ret, "w", "w", "=r", 2);
83                 __uaccess_end();
84                 return ret;
85         case 4:
86                 __uaccess_begin_nospec();
87                 __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
88                               ret, "l", "k", "=r", 4);
89                 __uaccess_end();
90                 return ret;
91         case 8:
92                 __uaccess_begin_nospec();
93                 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
94                               ret, "q", "", "=r", 8);
95                 __uaccess_end();
96                 return ret;
97         case 10:
98                 __uaccess_begin_nospec();
99                 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
100                                ret, "q", "", "=r", 10);
101                 if (likely(!ret))
102                         __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
103                                        (u16 __user *)(8 + (char __user *)src),
104                                        ret, "w", "w", "=r", 2);
105                 __uaccess_end();
106                 return ret;
107         case 16:
108                 __uaccess_begin_nospec();
109                 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
110                                ret, "q", "", "=r", 16);
111                 if (likely(!ret))
112                         __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
113                                        (u64 __user *)(8 + (char __user *)src),
114                                        ret, "q", "", "=r", 8);
115                 __uaccess_end();
116                 return ret;
117         default:
118                 return copy_user_generic(dst, (__force void *)src, size);
119         }
120 }
121
122 static __always_inline __must_check unsigned long
123 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
124 {
125         int ret = 0;
126
127         if (!__builtin_constant_p(size))
128                 return copy_user_generic((__force void *)dst, src, size);
129         switch (size) {
130         case 1:
131                 __uaccess_begin();
132                 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
133                               ret, "b", "b", "iq", 1);
134                 __uaccess_end();
135                 return ret;
136         case 2:
137                 __uaccess_begin();
138                 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
139                               ret, "w", "w", "ir", 2);
140                 __uaccess_end();
141                 return ret;
142         case 4:
143                 __uaccess_begin();
144                 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
145                               ret, "l", "k", "ir", 4);
146                 __uaccess_end();
147                 return ret;
148         case 8:
149                 __uaccess_begin();
150                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
151                               ret, "q", "", "er", 8);
152                 __uaccess_end();
153                 return ret;
154         case 10:
155                 __uaccess_begin();
156                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
157                                ret, "q", "", "er", 10);
158                 if (likely(!ret)) {
159                         asm("":::"memory");
160                         __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
161                                        ret, "w", "w", "ir", 2);
162                 }
163                 __uaccess_end();
164                 return ret;
165         case 16:
166                 __uaccess_begin();
167                 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
168                                ret, "q", "", "er", 16);
169                 if (likely(!ret)) {
170                         asm("":::"memory");
171                         __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
172                                        ret, "q", "", "er", 8);
173                 }
174                 __uaccess_end();
175                 return ret;
176         default:
177                 return copy_user_generic((__force void *)dst, src, size);
178         }
179 }
180
181 static __always_inline __must_check
182 unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
183 {
184         return copy_user_generic((__force void *)dst,
185                                  (__force void *)src, size);
186 }
187
188 extern long __copy_user_nocache(void *dst, const void __user *src,
189                                 unsigned size, int zerorest);
190
191 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
192 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
193                            size_t len);
194
195 static inline int
196 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
197                                   unsigned size)
198 {
199         kasan_check_write(dst, size);
200         return __copy_user_nocache(dst, src, size, 0);
201 }
202
203 static inline int
204 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
205 {
206         kasan_check_write(dst, size);
207         return __copy_user_flushcache(dst, src, size);
208 }
209
210 unsigned long
211 mcsafe_handle_tail(char *to, char *from, unsigned len);
212
213 #endif /* _ASM_X86_UACCESS_64_H */