]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_STRING_64_H |
3 | #define _ASM_X86_STRING_64_H | |
1da177e4 LT |
4 | |
5 | #ifdef __KERNEL__ | |
3637efb0 | 6 | #include <linux/jump_label.h> |
1da177e4 | 7 | |
953b2f1e | 8 | /* Written 2002 by Andi Kleen */ |
1da177e4 | 9 | |
953b2f1e JP |
10 | /* Only used for special circumstances. Stolen from i386/string.h */ |
11 | static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n) | |
1da177e4 | 12 | { |
953b2f1e JP |
13 | unsigned long d0, d1, d2; |
14 | asm volatile("rep ; movsl\n\t" | |
15 | "testb $2,%b4\n\t" | |
16 | "je 1f\n\t" | |
17 | "movsw\n" | |
18 | "1:\ttestb $1,%b4\n\t" | |
19 | "je 2f\n\t" | |
20 | "movsb\n" | |
21 | "2:" | |
22 | : "=&c" (d0), "=&D" (d1), "=&S" (d2) | |
23 | : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from) | |
24 | : "memory"); | |
25 | return to; | |
1da177e4 LT |
26 | } |
27 | ||
28 | /* Even with __builtin_ the compiler may decide to use the out of line | |
29 | function. */ | |
30 | ||
31 | #define __HAVE_ARCH_MEMCPY 1 | |
a75ca545 | 32 | extern void *memcpy(void *to, const void *from, size_t len); |
393f203f AR |
33 | extern void *__memcpy(void *to, const void *from, size_t len); |
34 | ||
6974f0c4 | 35 | #ifndef CONFIG_FORTIFY_SOURCE |
f8561296 | 36 | #ifndef CONFIG_KMEMCHECK |
a75ca545 | 37 | #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 |
953b2f1e JP |
38 | #define memcpy(dst, src, len) \ |
39 | ({ \ | |
40 | size_t __len = (len); \ | |
41 | void *__ret; \ | |
42 | if (__builtin_constant_p(len) && __len >= 64) \ | |
43 | __ret = __memcpy((dst), (src), __len); \ | |
44 | else \ | |
45 | __ret = __builtin_memcpy((dst), (src), __len); \ | |
46 | __ret; \ | |
47 | }) | |
aac57f81 | 48 | #endif |
f8561296 VN |
49 | #else |
50 | /* | |
51 | * kmemcheck becomes very happy if we use the REP instructions unconditionally, | |
52 | * because it means that we know both memory operands in advance. | |
53 | */ | |
54 | #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len)) | |
55 | #endif | |
6974f0c4 | 56 | #endif /* !CONFIG_FORTIFY_SOURCE */ |
1da177e4 LT |
57 | |
58 | #define __HAVE_ARCH_MEMSET | |
6edfba1b | 59 | void *memset(void *s, int c, size_t n); |
393f203f | 60 | void *__memset(void *s, int c, size_t n); |
1da177e4 | 61 | |
4c512485 MW |
62 | #define __HAVE_ARCH_MEMSET16 |
63 | static inline void *memset16(uint16_t *s, uint16_t v, size_t n) | |
64 | { | |
65 | long d0, d1; | |
66 | asm volatile("rep\n\t" | |
67 | "stosw" | |
68 | : "=&c" (d0), "=&D" (d1) | |
69 | : "a" (v), "1" (s), "0" (n) | |
70 | : "memory"); | |
71 | return s; | |
72 | } | |
73 | ||
74 | #define __HAVE_ARCH_MEMSET32 | |
75 | static inline void *memset32(uint32_t *s, uint32_t v, size_t n) | |
76 | { | |
77 | long d0, d1; | |
78 | asm volatile("rep\n\t" | |
79 | "stosl" | |
80 | : "=&c" (d0), "=&D" (d1) | |
81 | : "a" (v), "1" (s), "0" (n) | |
82 | : "memory"); | |
83 | return s; | |
84 | } | |
85 | ||
86 | #define __HAVE_ARCH_MEMSET64 | |
87 | static inline void *memset64(uint64_t *s, uint64_t v, size_t n) | |
88 | { | |
89 | long d0, d1; | |
90 | asm volatile("rep\n\t" | |
91 | "stosq" | |
92 | : "=&c" (d0), "=&D" (d1) | |
93 | : "a" (v), "1" (s), "0" (n) | |
94 | : "memory"); | |
95 | return s; | |
96 | } | |
97 | ||
1da177e4 | 98 | #define __HAVE_ARCH_MEMMOVE |
953b2f1e | 99 | void *memmove(void *dest, const void *src, size_t count); |
393f203f | 100 | void *__memmove(void *dest, const void *src, size_t count); |
1da177e4 | 101 | |
953b2f1e JP |
102 | int memcmp(const void *cs, const void *ct, size_t count); |
103 | size_t strlen(const char *s); | |
104 | char *strcpy(char *dest, const char *src); | |
105 | char *strcat(char *dest, const char *src); | |
106 | int strcmp(const char *cs, const char *ct); | |
1da177e4 | 107 | |
393f203f AR |
108 | #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) |
109 | ||
110 | /* | |
111 | * For files that not instrumented (e.g. mm/slub.c) we | |
112 | * should use not instrumented version of mem* functions. | |
113 | */ | |
114 | ||
115 | #undef memcpy | |
116 | #define memcpy(dst, src, len) __memcpy(dst, src, len) | |
117 | #define memmove(dst, src, len) __memmove(dst, src, len) | |
118 | #define memset(s, c, n) __memset(s, c, n) | |
6974f0c4 DM |
119 | |
120 | #ifndef __NO_FORTIFY | |
121 | #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ | |
122 | #endif | |
123 | ||
393f203f AR |
124 | #endif |
125 | ||
6abccd1b | 126 | #define __HAVE_ARCH_MEMCPY_MCSAFE 1 |
9a6fb28a | 127 | __must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt); |
3637efb0 TL |
128 | DECLARE_STATIC_KEY_FALSE(mcsafe_key); |
129 | ||
92b0729c TL |
130 | /** |
131 | * memcpy_mcsafe - copy memory with indication if a machine check happened | |
132 | * | |
133 | * @dst: destination address | |
134 | * @src: source address | |
135 | * @cnt: number of bytes to copy | |
136 | * | |
137 | * Low level memory copy function that catches machine checks | |
9a6fb28a TL |
138 | * We only call into the "safe" function on systems that can |
139 | * actually do machine check recovery. Everyone else can just | |
140 | * use memcpy(). | |
92b0729c | 141 | * |
cbf8b5a2 | 142 | * Return 0 for success, -EFAULT for fail |
92b0729c | 143 | */ |
9a6fb28a TL |
144 | static __always_inline __must_check int |
145 | memcpy_mcsafe(void *dst, const void *src, size_t cnt) | |
146 | { | |
147 | #ifdef CONFIG_X86_MCE | |
148 | if (static_branch_unlikely(&mcsafe_key)) | |
149 | return memcpy_mcsafe_unrolled(dst, src, cnt); | |
150 | else | |
151 | #endif | |
152 | memcpy(dst, src, cnt); | |
153 | return 0; | |
154 | } | |
92b0729c | 155 | |
0aed55af DW |
156 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
157 | #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1 | |
158 | void memcpy_flushcache(void *dst, const void *src, size_t cnt); | |
159 | #endif | |
160 | ||
1da177e4 LT |
161 | #endif /* __KERNEL__ */ |
162 | ||
1965aae3 | 163 | #endif /* _ASM_X86_STRING_64_H */ |