1 #ifndef _ASM_X86_STRING_64_H
2 #define _ASM_X86_STRING_64_H
5 #include <linux/jump_label.h>
7 /* Written 2002 by Andi Kleen */
9 /* Only used for special circumstances. Stolen from i386/string.h */
10 static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
12 unsigned long d0, d1, d2;
13 asm volatile("rep ; movsl\n\t"
17 "1:\ttestb $1,%b4\n\t"
21 : "=&c" (d0), "=&D" (d1), "=&S" (d2)
22 : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
27 /* Even with __builtin_ the compiler may decide to use the out of line
30 #define __HAVE_ARCH_MEMCPY 1
31 extern void *memcpy(void *to, const void *from, size_t len);
32 extern void *__memcpy(void *to, const void *from, size_t len);
34 #ifndef CONFIG_FORTIFY_SOURCE
35 #ifndef CONFIG_KMEMCHECK
36 #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
37 #define memcpy(dst, src, len) \
39 size_t __len = (len); \
41 if (__builtin_constant_p(len) && __len >= 64) \
42 __ret = __memcpy((dst), (src), __len); \
44 __ret = __builtin_memcpy((dst), (src), __len); \
50 * kmemcheck becomes very happy if we use the REP instructions unconditionally,
51 * because it means that we know both memory operands in advance.
53 #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
55 #endif /* !CONFIG_FORTIFY_SOURCE */
57 #define __HAVE_ARCH_MEMSET
58 void *memset(void *s, int c, size_t n);
59 void *__memset(void *s, int c, size_t n);
61 #define __HAVE_ARCH_MEMSET16
62 static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
65 asm volatile("rep\n\t"
67 : "=&c" (d0), "=&D" (d1)
68 : "a" (v), "1" (s), "0" (n)
73 #define __HAVE_ARCH_MEMSET32
74 static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
77 asm volatile("rep\n\t"
79 : "=&c" (d0), "=&D" (d1)
80 : "a" (v), "1" (s), "0" (n)
85 #define __HAVE_ARCH_MEMSET64
86 static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
89 asm volatile("rep\n\t"
91 : "=&c" (d0), "=&D" (d1)
92 : "a" (v), "1" (s), "0" (n)
97 #define __HAVE_ARCH_MEMMOVE
98 void *memmove(void *dest, const void *src, size_t count);
99 void *__memmove(void *dest, const void *src, size_t count);
101 int memcmp(const void *cs, const void *ct, size_t count);
102 size_t strlen(const char *s);
103 char *strcpy(char *dest, const char *src);
104 char *strcat(char *dest, const char *src);
105 int strcmp(const char *cs, const char *ct);
107 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
110 * For files that not instrumented (e.g. mm/slub.c) we
111 * should use not instrumented version of mem* functions.
115 #define memcpy(dst, src, len) __memcpy(dst, src, len)
116 #define memmove(dst, src, len) __memmove(dst, src, len)
117 #define memset(s, c, n) __memset(s, c, n)
120 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
125 #define __HAVE_ARCH_MEMCPY_MCSAFE 1
126 __must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
127 DECLARE_STATIC_KEY_FALSE(mcsafe_key);
130 * memcpy_mcsafe - copy memory with indication if a machine check happened
132 * @dst: destination address
133 * @src: source address
134 * @cnt: number of bytes to copy
136 * Low level memory copy function that catches machine checks
137 * We only call into the "safe" function on systems that can
138 * actually do machine check recovery. Everyone else can just
141 * Return 0 for success, -EFAULT for fail
143 static __always_inline __must_check int
144 memcpy_mcsafe(void *dst, const void *src, size_t cnt)
146 #ifdef CONFIG_X86_MCE
147 if (static_branch_unlikely(&mcsafe_key))
148 return memcpy_mcsafe_unrolled(dst, src, cnt);
151 memcpy(dst, src, cnt);
155 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
156 #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
157 void memcpy_flushcache(void *dst, const void *src, size_t cnt);
160 #endif /* __KERNEL__ */
162 #endif /* _ASM_X86_STRING_64_H */