]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/include/asm/uaccess_64.h
kill __copy_from_user_nocache()
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / uaccess_64.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_UACCESS_64_H
2#define _ASM_X86_UACCESS_64_H
1da177e4
LT
3
4/*
5 * User space memory access functions
6 */
1da177e4 7#include <linux/compiler.h>
16dbc6c9 8#include <linux/lockdep.h>
1771c6e1 9#include <linux/kasan-checks.h>
1b1d9258 10#include <asm/alternative.h>
cd4d09ec 11#include <asm/cpufeatures.h>
1da177e4
LT
12#include <asm/page.h>
13
1da177e4
LT
14/*
15 * Copy To/From Userspace
16 */
17
18/* Handles exceptions in both to and from, but doesn't do access_ok */
95912008 19__must_check unsigned long
954e482b
FY
20copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
21__must_check unsigned long
1b1d9258
JB
22copy_user_generic_string(void *to, const void *from, unsigned len);
23__must_check unsigned long
24copy_user_generic_unrolled(void *to, const void *from, unsigned len);
25
26static __always_inline __must_check unsigned long
27copy_user_generic(void *to, const void *from, unsigned len)
28{
29 unsigned ret;
30
954e482b
FY
31 /*
32 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
33 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
34 * Otherwise, use copy_user_generic_unrolled.
35 */
36 alternative_call_2(copy_user_generic_unrolled,
1b1d9258
JB
37 copy_user_generic_string,
38 X86_FEATURE_REP_GOOD,
954e482b
FY
39 copy_user_enhanced_fast_string,
40 X86_FEATURE_ERMS,
1b1d9258
JB
41 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
42 "=d" (len)),
43 "1" (to), "2" (from), "3" (len)
44 : "memory", "rcx", "r8", "r9", "r10", "r11");
45 return ret;
46}
95912008 47
95912008 48__must_check unsigned long
95912008
AK
49copy_in_user(void __user *to, const void __user *from, unsigned len);
50
51static __always_inline __must_check
ff47ab4f 52int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
b896313e 53{
383d079b 54 int ret = 0;
c10d38dd 55
5b710f34 56 check_object_size(dst, size, false);
1da177e4 57 if (!__builtin_constant_p(size))
b896313e
JP
58 return copy_user_generic(dst, (__force void *)src, size);
59 switch (size) {
11f1a4b9
LT
60 case 1:
61 __uaccess_begin();
122b05dd 62 __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src,
b896313e 63 ret, "b", "b", "=q", 1);
11f1a4b9 64 __uaccess_end();
1da177e4 65 return ret;
11f1a4b9
LT
66 case 2:
67 __uaccess_begin();
122b05dd 68 __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src,
b896313e 69 ret, "w", "w", "=r", 2);
11f1a4b9 70 __uaccess_end();
1da177e4 71 return ret;
11f1a4b9
LT
72 case 4:
73 __uaccess_begin();
122b05dd 74 __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src,
b896313e 75 ret, "l", "k", "=r", 4);
11f1a4b9 76 __uaccess_end();
b896313e 77 return ret;
11f1a4b9
LT
78 case 8:
79 __uaccess_begin();
122b05dd 80 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
b896313e 81 ret, "q", "", "=r", 8);
11f1a4b9 82 __uaccess_end();
1da177e4 83 return ret;
1da177e4 84 case 10:
11f1a4b9 85 __uaccess_begin();
122b05dd 86 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
20a4a236 87 ret, "q", "", "=r", 10);
11f1a4b9 88 if (likely(!ret))
122b05dd 89 __get_user_asm_nozero(*(u16 *)(8 + (char *)dst),
11f1a4b9
LT
90 (u16 __user *)(8 + (char __user *)src),
91 ret, "w", "w", "=r", 2);
92 __uaccess_end();
b896313e 93 return ret;
1da177e4 94 case 16:
11f1a4b9 95 __uaccess_begin();
122b05dd 96 __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src,
b896313e 97 ret, "q", "", "=r", 16);
11f1a4b9 98 if (likely(!ret))
122b05dd 99 __get_user_asm_nozero(*(u64 *)(8 + (char *)dst),
11f1a4b9
LT
100 (u64 __user *)(8 + (char __user *)src),
101 ret, "q", "", "=r", 8);
102 __uaccess_end();
b896313e 103 return ret;
1da177e4 104 default:
b896313e 105 return copy_user_generic(dst, (__force void *)src, size);
1da177e4 106 }
b896313e 107}
1da177e4 108
95912008 109static __always_inline __must_check
ff47ab4f
AK
110int __copy_from_user(void *dst, const void __user *src, unsigned size)
111{
112 might_fault();
1771c6e1 113 kasan_check_write(dst, size);
ff47ab4f
AK
114 return __copy_from_user_nocheck(dst, src, size);
115}
116
117static __always_inline __must_check
118int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
b896313e 119{
383d079b 120 int ret = 0;
c10d38dd 121
5b710f34 122 check_object_size(src, size, true);
1da177e4 123 if (!__builtin_constant_p(size))
b896313e
JP
124 return copy_user_generic((__force void *)dst, src, size);
125 switch (size) {
11f1a4b9
LT
126 case 1:
127 __uaccess_begin();
128 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
b896313e 129 ret, "b", "b", "iq", 1);
11f1a4b9 130 __uaccess_end();
1da177e4 131 return ret;
11f1a4b9
LT
132 case 2:
133 __uaccess_begin();
134 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
b896313e 135 ret, "w", "w", "ir", 2);
11f1a4b9 136 __uaccess_end();
1da177e4 137 return ret;
11f1a4b9
LT
138 case 4:
139 __uaccess_begin();
140 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
b896313e 141 ret, "l", "k", "ir", 4);
11f1a4b9 142 __uaccess_end();
b896313e 143 return ret;
11f1a4b9
LT
144 case 8:
145 __uaccess_begin();
146 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
155b7352 147 ret, "q", "", "er", 8);
11f1a4b9 148 __uaccess_end();
1da177e4 149 return ret;
1da177e4 150 case 10:
11f1a4b9 151 __uaccess_begin();
b896313e 152 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
155b7352 153 ret, "q", "", "er", 10);
11f1a4b9
LT
154 if (likely(!ret)) {
155 asm("":::"memory");
156 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
157 ret, "w", "w", "ir", 2);
158 }
159 __uaccess_end();
b896313e 160 return ret;
1da177e4 161 case 16:
11f1a4b9 162 __uaccess_begin();
b896313e 163 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
155b7352 164 ret, "q", "", "er", 16);
11f1a4b9
LT
165 if (likely(!ret)) {
166 asm("":::"memory");
167 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
168 ret, "q", "", "er", 8);
169 }
170 __uaccess_end();
b896313e 171 return ret;
1da177e4 172 default:
b896313e 173 return copy_user_generic((__force void *)dst, src, size);
1da177e4 174 }
b896313e 175}
1da177e4 176
ff47ab4f
AK
177static __always_inline __must_check
178int __copy_to_user(void __user *dst, const void *src, unsigned size)
179{
180 might_fault();
1771c6e1 181 kasan_check_read(src, size);
ff47ab4f
AK
182 return __copy_to_user_nocheck(dst, src, size);
183}
184
95912008
AK
185static __always_inline __must_check
186int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
b896313e 187{
383d079b 188 int ret = 0;
c10d38dd 189
3ee1afa3 190 might_fault();
1da177e4 191 if (!__builtin_constant_p(size))
b896313e
JP
192 return copy_user_generic((__force void *)dst,
193 (__force void *)src, size);
194 switch (size) {
195 case 1: {
1da177e4 196 u8 tmp;
11f1a4b9 197 __uaccess_begin();
b896313e
JP
198 __get_user_asm(tmp, (u8 __user *)src,
199 ret, "b", "b", "=q", 1);
1da177e4 200 if (likely(!ret))
b896313e
JP
201 __put_user_asm(tmp, (u8 __user *)dst,
202 ret, "b", "b", "iq", 1);
11f1a4b9 203 __uaccess_end();
1da177e4
LT
204 return ret;
205 }
b896313e 206 case 2: {
1da177e4 207 u16 tmp;
11f1a4b9 208 __uaccess_begin();
b896313e
JP
209 __get_user_asm(tmp, (u16 __user *)src,
210 ret, "w", "w", "=r", 2);
1da177e4 211 if (likely(!ret))
b896313e
JP
212 __put_user_asm(tmp, (u16 __user *)dst,
213 ret, "w", "w", "ir", 2);
11f1a4b9 214 __uaccess_end();
1da177e4
LT
215 return ret;
216 }
217
b896313e 218 case 4: {
1da177e4 219 u32 tmp;
11f1a4b9 220 __uaccess_begin();
b896313e
JP
221 __get_user_asm(tmp, (u32 __user *)src,
222 ret, "l", "k", "=r", 4);
1da177e4 223 if (likely(!ret))
b896313e
JP
224 __put_user_asm(tmp, (u32 __user *)dst,
225 ret, "l", "k", "ir", 4);
11f1a4b9 226 __uaccess_end();
1da177e4
LT
227 return ret;
228 }
b896313e 229 case 8: {
1da177e4 230 u64 tmp;
11f1a4b9 231 __uaccess_begin();
b896313e
JP
232 __get_user_asm(tmp, (u64 __user *)src,
233 ret, "q", "", "=r", 8);
1da177e4 234 if (likely(!ret))
b896313e 235 __put_user_asm(tmp, (u64 __user *)dst,
155b7352 236 ret, "q", "", "er", 8);
11f1a4b9 237 __uaccess_end();
1da177e4
LT
238 return ret;
239 }
240 default:
b896313e
JP
241 return copy_user_generic((__force void *)dst,
242 (__force void *)src, size);
1da177e4 243 }
b896313e 244}
1da177e4 245
14722485
JB
246static __must_check __always_inline int
247__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
248{
1771c6e1 249 kasan_check_write(dst, size);
df90ca96 250 return __copy_from_user_nocheck(dst, src, size);
14722485 251}
b885808e
AK
252
253static __must_check __always_inline int
254__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
255{
1771c6e1 256 kasan_check_read(src, size);
df90ca96 257 return __copy_to_user_nocheck(dst, src, size);
b885808e 258}
1da177e4 259
b896313e
JP
260extern long __copy_user_nocache(void *dst, const void __user *src,
261 unsigned size, int zerorest);
0812a579 262
f1800536
IM
263static inline int
264__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
265 unsigned size)
0812a579 266{
1771c6e1 267 kasan_check_write(dst, size);
f1800536 268 return __copy_user_nocache(dst, src, size, 0);
0812a579
AK
269}
270
1129585a 271unsigned long
cae2a173 272copy_user_handle_tail(char *to, char *from, unsigned len);
1129585a 273
1965aae3 274#endif /* _ASM_X86_UACCESS_64_H */