]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/x86/include/asm/uaccess_64.h
Merge remote-tracking branches 'asoc/topic/sgtl5000', 'asoc/topic/simple', 'asoc...
[mirror_ubuntu-zesty-kernel.git] / arch / x86 / include / asm / uaccess_64.h
1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
3
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
13 #include <asm/page.h>
14
15 /*
16 * Copy To/From Userspace
17 */
18
19 /* Handles exceptions in both to and from, but doesn't do access_ok */
20 __must_check unsigned long
21 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22 __must_check unsigned long
23 copy_user_generic_string(void *to, const void *from, unsigned len);
24 __must_check unsigned long
25 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26
27 static __always_inline __must_check unsigned long
28 copy_user_generic(void *to, const void *from, unsigned len)
29 {
30 unsigned ret;
31
32 /*
33 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35 * Otherwise, use copy_user_generic_unrolled.
36 */
37 alternative_call_2(copy_user_generic_unrolled,
38 copy_user_generic_string,
39 X86_FEATURE_REP_GOOD,
40 copy_user_enhanced_fast_string,
41 X86_FEATURE_ERMS,
42 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 "=d" (len)),
44 "1" (to), "2" (from), "3" (len)
45 : "memory", "rcx", "r8", "r9", "r10", "r11");
46 return ret;
47 }
48
49 __must_check unsigned long
50 copy_in_user(void __user *to, const void __user *from, unsigned len);
51
52 static __always_inline __must_check
53 int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
54 {
55 int ret = 0;
56
57 check_object_size(dst, size, false);
58 if (!__builtin_constant_p(size))
59 return copy_user_generic(dst, (__force void *)src, size);
60 switch (size) {
61 case 1:
62 __uaccess_begin();
63 __get_user_asm(*(u8 *)dst, (u8 __user *)src,
64 ret, "b", "b", "=q", 1);
65 __uaccess_end();
66 return ret;
67 case 2:
68 __uaccess_begin();
69 __get_user_asm(*(u16 *)dst, (u16 __user *)src,
70 ret, "w", "w", "=r", 2);
71 __uaccess_end();
72 return ret;
73 case 4:
74 __uaccess_begin();
75 __get_user_asm(*(u32 *)dst, (u32 __user *)src,
76 ret, "l", "k", "=r", 4);
77 __uaccess_end();
78 return ret;
79 case 8:
80 __uaccess_begin();
81 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
82 ret, "q", "", "=r", 8);
83 __uaccess_end();
84 return ret;
85 case 10:
86 __uaccess_begin();
87 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
88 ret, "q", "", "=r", 10);
89 if (likely(!ret))
90 __get_user_asm(*(u16 *)(8 + (char *)dst),
91 (u16 __user *)(8 + (char __user *)src),
92 ret, "w", "w", "=r", 2);
93 __uaccess_end();
94 return ret;
95 case 16:
96 __uaccess_begin();
97 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
98 ret, "q", "", "=r", 16);
99 if (likely(!ret))
100 __get_user_asm(*(u64 *)(8 + (char *)dst),
101 (u64 __user *)(8 + (char __user *)src),
102 ret, "q", "", "=r", 8);
103 __uaccess_end();
104 return ret;
105 default:
106 return copy_user_generic(dst, (__force void *)src, size);
107 }
108 }
109
110 static __always_inline __must_check
111 int __copy_from_user(void *dst, const void __user *src, unsigned size)
112 {
113 might_fault();
114 kasan_check_write(dst, size);
115 return __copy_from_user_nocheck(dst, src, size);
116 }
117
118 static __always_inline __must_check
119 int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
120 {
121 int ret = 0;
122
123 check_object_size(src, size, true);
124 if (!__builtin_constant_p(size))
125 return copy_user_generic((__force void *)dst, src, size);
126 switch (size) {
127 case 1:
128 __uaccess_begin();
129 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
130 ret, "b", "b", "iq", 1);
131 __uaccess_end();
132 return ret;
133 case 2:
134 __uaccess_begin();
135 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
136 ret, "w", "w", "ir", 2);
137 __uaccess_end();
138 return ret;
139 case 4:
140 __uaccess_begin();
141 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
142 ret, "l", "k", "ir", 4);
143 __uaccess_end();
144 return ret;
145 case 8:
146 __uaccess_begin();
147 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
148 ret, "q", "", "er", 8);
149 __uaccess_end();
150 return ret;
151 case 10:
152 __uaccess_begin();
153 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
154 ret, "q", "", "er", 10);
155 if (likely(!ret)) {
156 asm("":::"memory");
157 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
158 ret, "w", "w", "ir", 2);
159 }
160 __uaccess_end();
161 return ret;
162 case 16:
163 __uaccess_begin();
164 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
165 ret, "q", "", "er", 16);
166 if (likely(!ret)) {
167 asm("":::"memory");
168 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
169 ret, "q", "", "er", 8);
170 }
171 __uaccess_end();
172 return ret;
173 default:
174 return copy_user_generic((__force void *)dst, src, size);
175 }
176 }
177
178 static __always_inline __must_check
179 int __copy_to_user(void __user *dst, const void *src, unsigned size)
180 {
181 might_fault();
182 kasan_check_read(src, size);
183 return __copy_to_user_nocheck(dst, src, size);
184 }
185
186 static __always_inline __must_check
187 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
188 {
189 int ret = 0;
190
191 might_fault();
192 if (!__builtin_constant_p(size))
193 return copy_user_generic((__force void *)dst,
194 (__force void *)src, size);
195 switch (size) {
196 case 1: {
197 u8 tmp;
198 __uaccess_begin();
199 __get_user_asm(tmp, (u8 __user *)src,
200 ret, "b", "b", "=q", 1);
201 if (likely(!ret))
202 __put_user_asm(tmp, (u8 __user *)dst,
203 ret, "b", "b", "iq", 1);
204 __uaccess_end();
205 return ret;
206 }
207 case 2: {
208 u16 tmp;
209 __uaccess_begin();
210 __get_user_asm(tmp, (u16 __user *)src,
211 ret, "w", "w", "=r", 2);
212 if (likely(!ret))
213 __put_user_asm(tmp, (u16 __user *)dst,
214 ret, "w", "w", "ir", 2);
215 __uaccess_end();
216 return ret;
217 }
218
219 case 4: {
220 u32 tmp;
221 __uaccess_begin();
222 __get_user_asm(tmp, (u32 __user *)src,
223 ret, "l", "k", "=r", 4);
224 if (likely(!ret))
225 __put_user_asm(tmp, (u32 __user *)dst,
226 ret, "l", "k", "ir", 4);
227 __uaccess_end();
228 return ret;
229 }
230 case 8: {
231 u64 tmp;
232 __uaccess_begin();
233 __get_user_asm(tmp, (u64 __user *)src,
234 ret, "q", "", "=r", 8);
235 if (likely(!ret))
236 __put_user_asm(tmp, (u64 __user *)dst,
237 ret, "q", "", "er", 8);
238 __uaccess_end();
239 return ret;
240 }
241 default:
242 return copy_user_generic((__force void *)dst,
243 (__force void *)src, size);
244 }
245 }
246
247 static __must_check __always_inline int
248 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
249 {
250 kasan_check_write(dst, size);
251 return __copy_from_user_nocheck(dst, src, size);
252 }
253
254 static __must_check __always_inline int
255 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
256 {
257 kasan_check_read(src, size);
258 return __copy_to_user_nocheck(dst, src, size);
259 }
260
261 extern long __copy_user_nocache(void *dst, const void __user *src,
262 unsigned size, int zerorest);
263
264 static inline int
265 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
266 {
267 might_fault();
268 kasan_check_write(dst, size);
269 return __copy_user_nocache(dst, src, size, 1);
270 }
271
272 static inline int
273 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
274 unsigned size)
275 {
276 kasan_check_write(dst, size);
277 return __copy_user_nocache(dst, src, size, 0);
278 }
279
280 unsigned long
281 copy_user_handle_tail(char *to, char *from, unsigned len);
282
283 #endif /* _ASM_X86_UACCESS_64_H */