1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
6 * User space memory access functions
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
16 * Copy To/From Userspace
19 /* Handles exceptions in both to and from, but doesn't do access_ok */
20 __must_check unsigned long
21 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22 __must_check unsigned long
23 copy_user_generic_string(void *to, const void *from, unsigned len);
24 __must_check unsigned long
25 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
27 static __always_inline __must_check unsigned long
28 copy_user_generic(void *to, const void *from, unsigned len)
33 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35 * Otherwise, use copy_user_generic_unrolled.
37 alternative_call_2(copy_user_generic_unrolled,
38 copy_user_generic_string,
40 copy_user_enhanced_fast_string,
42 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
44 "1" (to), "2" (from), "3" (len)
45 : "memory", "rcx", "r8", "r9", "r10", "r11");
49 static __always_inline __must_check unsigned long
50 copy_to_user_mcsafe(void *to, const void *from, unsigned len)
55 ret = memcpy_mcsafe(to, from, len);
60 static __always_inline __must_check unsigned long
61 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
65 if (!__builtin_constant_p(size))
66 return copy_user_generic(dst, (__force void *)src, size);
69 __uaccess_begin_nospec();
70 __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
71 ret, "b", "b", "=q", 1);
75 __uaccess_begin_nospec();
76 __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
77 ret, "w", "w", "=r", 2);
81 __uaccess_begin_nospec();
82 __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
83 ret, "l", "k", "=r", 4);
87 __uaccess_begin_nospec();
88 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
89 ret, "q", "", "=r", 8);
93 __uaccess_begin_nospec();
94 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
95 ret, "q", "", "=r", 10);
97 __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
98 (u16 __user *)(8 + (char __user *)src),
99 ret, "w", "w", "=r", 2);
103 __uaccess_begin_nospec();
104 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
105 ret, "q", "", "=r", 16);
107 __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
108 (u64 __user *)(8 + (char __user *)src),
109 ret, "q", "", "=r", 8);
113 return copy_user_generic(dst, (__force void *)src, size);
117 static __always_inline __must_check unsigned long
118 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
122 if (!__builtin_constant_p(size))
123 return copy_user_generic((__force void *)dst, src, size);
127 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
128 ret, "b", "b", "iq", 1);
133 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
134 ret, "w", "w", "ir", 2);
139 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
140 ret, "l", "k", "ir", 4);
145 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
146 ret, "q", "", "er", 8);
151 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
152 ret, "q", "", "er", 10);
155 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
156 ret, "w", "w", "ir", 2);
162 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
163 ret, "q", "", "er", 16);
166 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
167 ret, "q", "", "er", 8);
172 return copy_user_generic((__force void *)dst, src, size);
176 static __always_inline __must_check
177 unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
179 return copy_user_generic((__force void *)dst,
180 (__force void *)src, size);
183 extern long __copy_user_nocache(void *dst, const void __user *src,
184 unsigned size, int zerorest);
186 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
187 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
191 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
194 kasan_check_write(dst, size);
195 return __copy_user_nocache(dst, src, size, 0);
199 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
201 kasan_check_write(dst, size);
202 return __copy_user_flushcache(dst, src, size);
206 copy_user_handle_tail(char *to, char *from, unsigned len);
209 mcsafe_handle_tail(char *to, char *from, unsigned len);
211 #endif /* _ASM_X86_UACCESS_64_H */