]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_STRING_64_H |
2 | #define _ASM_X86_STRING_64_H | |
1da177e4 LT |
3 | |
4 | #ifdef __KERNEL__ | |
3637efb0 | 5 | #include <linux/jump_label.h> |
1da177e4 | 6 | |
953b2f1e | 7 | /* Written 2002 by Andi Kleen */ |
1da177e4 | 8 | |
953b2f1e JP |
9 | /* Only used for special circumstances. Stolen from i386/string.h */ |
10 | static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n) | |
1da177e4 | 11 | { |
953b2f1e JP |
12 | unsigned long d0, d1, d2; |
13 | asm volatile("rep ; movsl\n\t" | |
14 | "testb $2,%b4\n\t" | |
15 | "je 1f\n\t" | |
16 | "movsw\n" | |
17 | "1:\ttestb $1,%b4\n\t" | |
18 | "je 2f\n\t" | |
19 | "movsb\n" | |
20 | "2:" | |
21 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) | |
22 | : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) | |
23 | : "memory"); | |
24 | return to; | |
1da177e4 LT |
25 | } |
26 | ||
27 | /* Even with __builtin_ the compiler may decide to use the out of line | |
28 | function. */ | |
29 | ||
30 | #define __HAVE_ARCH_MEMCPY 1 | |
a75ca545 | 31 | extern void *memcpy(void *to, const void *from, size_t len); |
393f203f AR |
32 | extern void *__memcpy(void *to, const void *from, size_t len); |
33 | ||
6974f0c4 | 34 | #ifndef CONFIG_FORTIFY_SOURCE |
f8561296 | 35 | #ifndef CONFIG_KMEMCHECK |
a75ca545 | 36 | #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 |
953b2f1e JP |
37 | #define memcpy(dst, src, len) \ |
38 | ({ \ | |
39 | size_t __len = (len); \ | |
40 | void *__ret; \ | |
41 | if (__builtin_constant_p(len) && __len >= 64) \ | |
42 | __ret = __memcpy((dst), (src), __len); \ | |
43 | else \ | |
44 | __ret = __builtin_memcpy((dst), (src), __len); \ | |
45 | __ret; \ | |
46 | }) | |
aac57f81 | 47 | #endif |
f8561296 VN |
48 | #else |
49 | /* | |
50 | * kmemcheck becomes very happy if we use the REP instructions unconditionally, | |
51 | * because it means that we know both memory operands in advance. | |
52 | */ | |
53 | #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len)) | |
54 | #endif | |
6974f0c4 | 55 | #endif /* !CONFIG_FORTIFY_SOURCE */ |
1da177e4 LT |
56 | |
57 | #define __HAVE_ARCH_MEMSET | |
6edfba1b | 58 | void *memset(void *s, int c, size_t n); |
393f203f | 59 | void *__memset(void *s, int c, size_t n); |
1da177e4 LT |
60 | |
61 | #define __HAVE_ARCH_MEMMOVE | |
953b2f1e | 62 | void *memmove(void *dest, const void *src, size_t count); |
393f203f | 63 | void *__memmove(void *dest, const void *src, size_t count); |
1da177e4 | 64 | |
953b2f1e JP |
65 | int memcmp(const void *cs, const void *ct, size_t count); |
66 | size_t strlen(const char *s); | |
67 | char *strcpy(char *dest, const char *src); | |
68 | char *strcat(char *dest, const char *src); | |
69 | int strcmp(const char *cs, const char *ct); | |
1da177e4 | 70 | |
393f203f AR |
71 | #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) |
72 | ||
73 | /* | |
74 | * For files that not instrumented (e.g. mm/slub.c) we | |
75 | * should use not instrumented version of mem* functions. | |
76 | */ | |
77 | ||
78 | #undef memcpy | |
79 | #define memcpy(dst, src, len) __memcpy(dst, src, len) | |
80 | #define memmove(dst, src, len) __memmove(dst, src, len) | |
81 | #define memset(s, c, n) __memset(s, c, n) | |
6974f0c4 DM |
82 | |
83 | #ifndef __NO_FORTIFY | |
84 | #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ | |
85 | #endif | |
86 | ||
393f203f AR |
87 | #endif |
88 | ||
6abccd1b | 89 | #define __HAVE_ARCH_MEMCPY_MCSAFE 1 |
9a6fb28a | 90 | __must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt); |
3637efb0 TL |
91 | DECLARE_STATIC_KEY_FALSE(mcsafe_key); |
92 | ||
92b0729c TL |
93 | /** |
94 | * memcpy_mcsafe - copy memory with indication if a machine check happened | |
95 | * | |
96 | * @dst: destination address | |
97 | * @src: source address | |
98 | * @cnt: number of bytes to copy | |
99 | * | |
100 | * Low level memory copy function that catches machine checks | |
9a6fb28a TL |
101 | * We only call into the "safe" function on systems that can |
102 | * actually do machine check recovery. Everyone else can just | |
103 | * use memcpy(). | |
92b0729c | 104 | * |
cbf8b5a2 | 105 | * Return 0 for success, -EFAULT for fail |
92b0729c | 106 | */ |
9a6fb28a TL |
107 | static __always_inline __must_check int |
108 | memcpy_mcsafe(void *dst, const void *src, size_t cnt) | |
109 | { | |
110 | #ifdef CONFIG_X86_MCE | |
111 | if (static_branch_unlikely(&mcsafe_key)) | |
112 | return memcpy_mcsafe_unrolled(dst, src, cnt); | |
113 | else | |
114 | #endif | |
115 | memcpy(dst, src, cnt); | |
116 | return 0; | |
117 | } | |
92b0729c | 118 | |
0aed55af DW |
119 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
120 | #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1 | |
121 | void memcpy_flushcache(void *dst, const void *src, size_t cnt); | |
122 | #endif | |
123 | ||
1da177e4 LT |
124 | #endif /* __KERNEL__ */ |
125 | ||
1965aae3 | 126 | #endif /* _ASM_X86_STRING_64_H */ |