]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/blame - arch/x86/include/asm/uaccess.h
UBUNTU: Ubuntu-5.0.0-29.31
[mirror_ubuntu-disco-kernel.git] / arch / x86 / include / asm / uaccess.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_UACCESS_H
3#define _ASM_X86_UACCESS_H
ca233862
GC
4/*
5 * User space memory access functions
6 */
ca233862 7#include <linux/compiler.h>
1771c6e1 8#include <linux/kasan-checks.h>
ca233862
GC
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
63bcff2a 12#include <asm/smap.h>
45caf470 13#include <asm/extable.h>
ca233862 14
ca233862
GC
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(-1UL)
9063c61f 26#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
ca233862
GC
27
28#define get_ds() (KERNEL_DS)
13d4ea09 29#define get_fs() (current->thread.addr_limit)
5ea0727b
TG
30static inline void set_fs(mm_segment_t fs)
31{
32 current->thread.addr_limit = fs;
33 /* On user-mode return, check fs is correct */
34 set_thread_flag(TIF_FSCHECK);
35}
ca233862
GC
36
37#define segment_eq(a, b) ((a).seg == (b).seg)
38
13d4ea09 39#define user_addr_max() (current->thread.addr_limit.seg)
bc6ca7b3
AS
40#define __addr_ok(addr) \
41 ((unsigned long __force)(addr) < user_addr_max())
002ca169 42
ca233862
GC
43/*
44 * Test whether a block of memory is a valid user space address.
45 * Returns 0 if the range is valid, nonzero otherwise.
ca233862 46 */
a740576a 47static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
c5fe5d80
LT
48{
49 /*
50 * If we have used "sizeof()" for the size,
51 * we know it won't overflow the limit (but
52 * it might overflow the 'addr', so it's
53 * important to subtract the size from the
54 * limit, not add it to the address).
55 */
56 if (__builtin_constant_p(size))
7e0f51cb 57 return unlikely(addr > limit - size);
c5fe5d80
LT
58
59 /* Arbitrary sizes? Be careful about overflow */
60 addr += size;
7e0f51cb 61 if (unlikely(addr < size))
a740576a 62 return true;
7e0f51cb 63 return unlikely(addr > limit);
c5fe5d80 64}
ca233862 65
bc6ca7b3 66#define __range_not_ok(addr, size, limit) \
ca233862 67({ \
ca233862 68 __chk_user_ptr(addr); \
c5fe5d80 69 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
ca233862
GC
70})
71
7c478895
PZ
72#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
73# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
74#else
75# define WARN_ON_IN_IRQ()
76#endif
77
ca233862 78/**
e2d20c86 79 * access_ok - Checks if a user space pointer is valid
ca233862
GC
80 * @addr: User space pointer to start of block to check
81 * @size: Size of block to check
82 *
b3c395ef
DH
83 * Context: User context only. This function may sleep if pagefaults are
84 * enabled.
ca233862
GC
85 *
86 * Checks if a pointer to a block of memory in user space is valid.
87 *
ca233862
GC
88 * Note that, depending on architecture, this function probably just
89 * checks that the pointer is in the user space range - after calling
90 * this function, memory access functions may still return -EFAULT.
e2d20c86
MR
91 *
92 * Return: true (nonzero) if the memory block may be valid, false (zero)
93 * if it is definitely invalid.
ca233862 94 */
96d4f267 95#define access_ok(addr, size) \
7c478895
PZ
96({ \
97 WARN_ON_IN_IRQ(); \
98 likely(!__range_not_ok(addr, size, user_addr_max())); \
99})
ca233862 100
ca233862
GC
101/*
102 * These are the main single-value transfer routines. They automatically
103 * use the right size if we just have the right pointer type.
104 *
105 * This gets kind of ugly. We want to return _two_ values in "get_user()"
106 * and yet we don't want to do any pointers, because that is too much
107 * of a performance impact. Thus we have a few rather ugly macros here,
108 * and hide all the ugliness from the user.
109 *
110 * The "__xxx" versions of the user access functions are versions that
111 * do not verify the address space, that must have been done previously
112 * with a separate "access_ok()" call (this is used when we do multiple
113 * accesses to the same area of user memory).
114 */
115
116extern int __get_user_1(void);
117extern int __get_user_2(void);
118extern int __get_user_4(void);
119extern int __get_user_8(void);
120extern int __get_user_bad(void);
121
11f1a4b9
LT
122#define __uaccess_begin() stac()
123#define __uaccess_end() clac()
b3bbfb3f
DW
124#define __uaccess_begin_nospec() \
125({ \
126 stac(); \
127 barrier_nospec(); \
128})
11f1a4b9 129
3578baae
PA
130/*
131 * This is a type: either unsigned long, if the argument fits into
132 * that type, or otherwise unsigned long long.
133 */
134#define __inttype(x) \
135__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
865e5b76
GC
136
137/**
e2d20c86 138 * get_user - Get a simple variable from user space.
865e5b76
GC
139 * @x: Variable to store result.
140 * @ptr: Source address, in user space.
141 *
b3c395ef
DH
142 * Context: User context only. This function may sleep if pagefaults are
143 * enabled.
865e5b76
GC
144 *
145 * This macro copies a single simple variable from user space to kernel
146 * space. It supports simple types like char and int, but not larger
147 * data types like structures or arrays.
148 *
149 * @ptr must have pointer-to-simple-variable type, and the result of
150 * dereferencing @ptr must be assignable to @x without a cast.
151 *
e2d20c86 152 * Return: zero on success, or -EFAULT on error.
865e5b76 153 * On error, the variable @x is set to zero.
ff52c3b0
PA
154 */
155/*
3578baae
PA
156 * Careful: we have to cast the result to the type of the pointer
157 * for sign reasons.
ff52c3b0 158 *
f69fa9a9 159 * The use of _ASM_DX as the register specifier is a bit of a
ff52c3b0
PA
160 * simplification, as gcc only cares about it as the starting point
161 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
162 * (%ecx being the next register in gcc's x86 register sequence), and
163 * %rdx on 64 bits.
f69fa9a9
PA
164 *
165 * Clang/LLVM cares about the size of the register, but still wants
166 * the base register for something that ends up being a pair.
865e5b76 167 */
865e5b76
GC
168#define get_user(x, ptr) \
169({ \
170 int __ret_gu; \
bdfc017e 171 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
865e5b76 172 __chk_user_ptr(ptr); \
3ee1afa3 173 might_fault(); \
f05058c4 174 asm volatile("call __get_user_%P4" \
f5caf621
JP
175 : "=a" (__ret_gu), "=r" (__val_gu), \
176 ASM_CALL_CONSTRAINT \
3578baae 177 : "0" (ptr), "i" (sizeof(*(ptr)))); \
e182c570 178 (x) = (__force __typeof__(*(ptr))) __val_gu; \
a76cf66e 179 __builtin_expect(__ret_gu, 0); \
865e5b76
GC
180})
181
e30a44fd
GC
182#define __put_user_x(size, x, ptr, __ret_pu) \
183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
4d5d7838 184 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
e30a44fd
GC
185
186
187
dc70ddf4 188#ifdef CONFIG_X86_32
a959dc88
LT
189#define __put_user_goto_u64(x, addr, label) \
190 asm_volatile_goto("\n" \
191 "1: movl %%eax,0(%1)\n" \
192 "2: movl %%edx,4(%1)\n" \
193 _ASM_EXTABLE_UA(1b, %l2) \
194 _ASM_EXTABLE_UA(2b, %l2) \
195 : : "A" (x), "r" (addr) \
196 : : label)
e30a44fd 197
fe40c0af 198#define __put_user_asm_ex_u64(x, addr) \
11f1a4b9 199 asm volatile("\n" \
63bcff2a 200 "1: movl %%eax,0(%1)\n" \
fe40c0af 201 "2: movl %%edx,4(%1)\n" \
11f1a4b9 202 "3:" \
535c0c34
PA
203 _ASM_EXTABLE_EX(1b, 2b) \
204 _ASM_EXTABLE_EX(2b, 3b) \
fe40c0af
HS
205 : : "A" (x), "r" (addr))
206
e30a44fd
GC
207#define __put_user_x8(x, ptr, __ret_pu) \
208 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
209 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
dc70ddf4 210#else
a959dc88
LT
211#define __put_user_goto_u64(x, ptr, label) \
212 __put_user_goto(x, ptr, "q", "", "er", label)
fe40c0af 213#define __put_user_asm_ex_u64(x, addr) \
ebe119cd 214 __put_user_asm_ex(x, addr, "q", "", "er")
e30a44fd 215#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
dc70ddf4
GC
216#endif
217
e30a44fd
GC
218extern void __put_user_bad(void);
219
220/*
221 * Strange magic calling convention: pointer in %ecx,
222 * value in %eax(:%edx), return value in %eax. clobbers %rbx
223 */
224extern void __put_user_1(void);
225extern void __put_user_2(void);
226extern void __put_user_4(void);
227extern void __put_user_8(void);
228
e30a44fd 229/**
e2d20c86 230 * put_user - Write a simple value into user space.
e30a44fd
GC
231 * @x: Value to copy to user space.
232 * @ptr: Destination address, in user space.
233 *
b3c395ef
DH
234 * Context: User context only. This function may sleep if pagefaults are
235 * enabled.
e30a44fd
GC
236 *
237 * This macro copies a single simple value from kernel space to user
238 * space. It supports simple types like char and int, but not larger
239 * data types like structures or arrays.
240 *
241 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
242 * to the result of dereferencing @ptr.
243 *
e2d20c86 244 * Return: zero on success, or -EFAULT on error.
e30a44fd
GC
245 */
246#define put_user(x, ptr) \
247({ \
248 int __ret_pu; \
249 __typeof__(*(ptr)) __pu_val; \
250 __chk_user_ptr(ptr); \
3ee1afa3 251 might_fault(); \
e30a44fd
GC
252 __pu_val = x; \
253 switch (sizeof(*(ptr))) { \
254 case 1: \
255 __put_user_x(1, __pu_val, ptr, __ret_pu); \
256 break; \
257 case 2: \
258 __put_user_x(2, __pu_val, ptr, __ret_pu); \
259 break; \
260 case 4: \
261 __put_user_x(4, __pu_val, ptr, __ret_pu); \
262 break; \
263 case 8: \
264 __put_user_x8(__pu_val, ptr, __ret_pu); \
265 break; \
266 default: \
267 __put_user_x(X, __pu_val, ptr, __ret_pu); \
268 break; \
269 } \
a76cf66e 270 __builtin_expect(__ret_pu, 0); \
e30a44fd
GC
271})
272
a959dc88 273#define __put_user_size(x, ptr, size, label) \
dc70ddf4 274do { \
dc70ddf4
GC
275 __chk_user_ptr(ptr); \
276 switch (size) { \
277 case 1: \
a959dc88 278 __put_user_goto(x, ptr, "b", "b", "iq", label); \
dc70ddf4
GC
279 break; \
280 case 2: \
a959dc88 281 __put_user_goto(x, ptr, "w", "w", "ir", label); \
dc70ddf4
GC
282 break; \
283 case 4: \
a959dc88 284 __put_user_goto(x, ptr, "l", "k", "ir", label); \
dc70ddf4
GC
285 break; \
286 case 8: \
2a418cf3 287 __put_user_goto_u64(x, ptr, label); \
dc70ddf4
GC
288 break; \
289 default: \
290 __put_user_bad(); \
291 } \
292} while (0)
293
11f1a4b9
LT
294/*
295 * This doesn't do __uaccess_begin/end - the exception handling
296 * around it must do that.
297 */
fe40c0af
HS
298#define __put_user_size_ex(x, ptr, size) \
299do { \
300 __chk_user_ptr(ptr); \
301 switch (size) { \
302 case 1: \
303 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
304 break; \
305 case 2: \
306 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
307 break; \
308 case 4: \
309 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
310 break; \
311 case 8: \
312 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
313 break; \
314 default: \
315 __put_user_bad(); \
316 } \
317} while (0)
318
3f168221 319#ifdef CONFIG_X86_32
b2f68038
BL
320#define __get_user_asm_u64(x, ptr, retval, errret) \
321({ \
322 __typeof__(ptr) __ptr = (ptr); \
33c9e972 323 asm volatile("\n" \
b2f68038
BL
324 "1: movl %2,%%eax\n" \
325 "2: movl %3,%%edx\n" \
33c9e972 326 "3:\n" \
b2f68038
BL
327 ".section .fixup,\"ax\"\n" \
328 "4: mov %4,%0\n" \
329 " xorl %%eax,%%eax\n" \
330 " xorl %%edx,%%edx\n" \
331 " jmp 3b\n" \
332 ".previous\n" \
75045f77
JH
333 _ASM_EXTABLE_UA(1b, 4b) \
334 _ASM_EXTABLE_UA(2b, 4b) \
33c9e972 335 : "=r" (retval), "=&A"(x) \
5ac751d9 336 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
b2f68038
BL
337 "i" (errret), "0" (retval)); \
338})
339
fe40c0af 340#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
3f168221
GC
341#else
342#define __get_user_asm_u64(x, ptr, retval, errret) \
343 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
fe40c0af
HS
344#define __get_user_asm_ex_u64(x, ptr) \
345 __get_user_asm_ex(x, ptr, "q", "", "=r")
3f168221
GC
346#endif
347
348#define __get_user_size(x, ptr, size, retval, errret) \
349do { \
350 retval = 0; \
351 __chk_user_ptr(ptr); \
352 switch (size) { \
353 case 1: \
354 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
355 break; \
356 case 2: \
357 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
358 break; \
359 case 4: \
360 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
361 break; \
362 case 8: \
363 __get_user_asm_u64(x, ptr, retval, errret); \
364 break; \
365 default: \
366 (x) = __get_user_bad(); \
367 } \
368} while (0)
369
370#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11f1a4b9 371 asm volatile("\n" \
63bcff2a 372 "1: mov"itype" %2,%"rtype"1\n" \
11f1a4b9 373 "2:\n" \
3f168221
GC
374 ".section .fixup,\"ax\"\n" \
375 "3: mov %3,%0\n" \
376 " xor"itype" %"rtype"1,%"rtype"1\n" \
377 " jmp 2b\n" \
378 ".previous\n" \
75045f77 379 _ASM_EXTABLE_UA(1b, 3b) \
3f168221
GC
380 : "=r" (err), ltype(x) \
381 : "m" (__m(addr)), "i" (errret), "0" (err))
382
122b05dd
AV
383#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
384 asm volatile("\n" \
385 "1: mov"itype" %2,%"rtype"1\n" \
386 "2:\n" \
387 ".section .fixup,\"ax\"\n" \
388 "3: mov %3,%0\n" \
389 " jmp 2b\n" \
390 ".previous\n" \
75045f77 391 _ASM_EXTABLE_UA(1b, 3b) \
122b05dd
AV
392 : "=r" (err), ltype(x) \
393 : "m" (__m(addr)), "i" (errret), "0" (err))
394
11f1a4b9
LT
395/*
396 * This doesn't do __uaccess_begin/end - the exception handling
397 * around it must do that.
398 */
fe40c0af
HS
399#define __get_user_size_ex(x, ptr, size) \
400do { \
401 __chk_user_ptr(ptr); \
402 switch (size) { \
403 case 1: \
404 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
405 break; \
406 case 2: \
407 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
408 break; \
409 case 4: \
410 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
411 break; \
412 case 8: \
413 __get_user_asm_ex_u64(x, ptr); \
414 break; \
415 default: \
416 (x) = __get_user_bad(); \
417 } \
418} while (0)
419
420#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
5e88353d
PA
421 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
422 "2:\n" \
1c109fab
AV
423 ".section .fixup,\"ax\"\n" \
424 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
425 " jmp 2b\n" \
426 ".previous\n" \
427 _ASM_EXTABLE_EX(1b, 3b) \
fe40c0af
HS
428 : ltype(x) : "m" (__m(addr)))
429
dc70ddf4
GC
430#define __put_user_nocheck(x, ptr, size) \
431({ \
a959dc88
LT
432 __label__ __pu_label; \
433 int __pu_err = -EFAULT; \
b2356471
PZ
434 __typeof__(*(ptr)) __pu_val = (x); \
435 __typeof__(ptr) __pu_ptr = (ptr); \
436 __typeof__(size) __pu_size = (size); \
11f1a4b9 437 __uaccess_begin(); \
b2356471 438 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
a959dc88
LT
439 __pu_err = 0; \
440__pu_label: \
11f1a4b9 441 __uaccess_end(); \
a76cf66e 442 __builtin_expect(__pu_err, 0); \
dc70ddf4
GC
443})
444
3f168221
GC
445#define __get_user_nocheck(x, ptr, size) \
446({ \
16855f87 447 int __gu_err; \
b2f68038 448 __inttype(*(ptr)) __gu_val; \
304ec1b0 449 __uaccess_begin_nospec(); \
3f168221 450 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11f1a4b9 451 __uaccess_end(); \
3f168221 452 (x) = (__force __typeof__(*(ptr)))__gu_val; \
a76cf66e 453 __builtin_expect(__gu_err, 0); \
3f168221 454})
dc70ddf4
GC
455
456/* FIXME: this hack is definitely wrong -AK */
457struct __large_struct { unsigned long buf[100]; };
458#define __m(x) (*(struct __large_struct __user *)(x))
459
460/*
461 * Tell gcc we read from memory instead of writing: this is because
462 * we do not write to any memory gcc knows about, so there are no
463 * aliasing issues.
464 */
4a789213
LT
465#define __put_user_goto(x, addr, itype, rtype, ltype, label) \
466 asm_volatile_goto("\n" \
467 "1: mov"itype" %"rtype"0,%1\n" \
468 _ASM_EXTABLE_UA(1b, %l2) \
469 : : ltype(x), "m" (__m(addr)) \
470 : : label)
471
472#define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
473 ({ __label__ __puflab; \
474 int __pufret = errret; \
475 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
476 __pufret = 0; \
477 __puflab: __pufret; })
478
479#define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
480 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
481} while (0)
fe40c0af
HS
482
483#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
5e88353d
PA
484 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
485 "2:\n" \
535c0c34 486 _ASM_EXTABLE_EX(1b, 2b) \
fe40c0af
HS
487 : : ltype(x), "m" (__m(addr)))
488
489/*
490 * uaccess_try and catch
491 */
492#define uaccess_try do { \
dfa9a942 493 current->thread.uaccess_err = 0; \
11f1a4b9 494 __uaccess_begin(); \
fe40c0af
HS
495 barrier();
496
b3bbfb3f
DW
497#define uaccess_try_nospec do { \
498 current->thread.uaccess_err = 0; \
499 __uaccess_begin_nospec(); \
500
fe40c0af 501#define uaccess_catch(err) \
11f1a4b9 502 __uaccess_end(); \
dfa9a942 503 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
fe40c0af
HS
504} while (0)
505
8cb834e9 506/**
e2d20c86 507 * __get_user - Get a simple variable from user space, with less checking.
8cb834e9
GC
508 * @x: Variable to store result.
509 * @ptr: Source address, in user space.
510 *
b3c395ef
DH
511 * Context: User context only. This function may sleep if pagefaults are
512 * enabled.
8cb834e9
GC
513 *
514 * This macro copies a single simple variable from user space to kernel
515 * space. It supports simple types like char and int, but not larger
516 * data types like structures or arrays.
517 *
518 * @ptr must have pointer-to-simple-variable type, and the result of
519 * dereferencing @ptr must be assignable to @x without a cast.
520 *
521 * Caller must check the pointer with access_ok() before calling this
522 * function.
523 *
e2d20c86 524 * Return: zero on success, or -EFAULT on error.
8cb834e9
GC
525 * On error, the variable @x is set to zero.
526 */
527
528#define __get_user(x, ptr) \
529 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
fe40c0af 530
8cb834e9 531/**
e2d20c86 532 * __put_user - Write a simple value into user space, with less checking.
8cb834e9
GC
533 * @x: Value to copy to user space.
534 * @ptr: Destination address, in user space.
535 *
b3c395ef
DH
536 * Context: User context only. This function may sleep if pagefaults are
537 * enabled.
8cb834e9
GC
538 *
539 * This macro copies a single simple value from kernel space to user
540 * space. It supports simple types like char and int, but not larger
541 * data types like structures or arrays.
542 *
543 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
544 * to the result of dereferencing @ptr.
545 *
546 * Caller must check the pointer with access_ok() before calling this
547 * function.
548 *
e2d20c86 549 * Return: zero on success, or -EFAULT on error.
8cb834e9
GC
550 */
551
552#define __put_user(x, ptr) \
553 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
dc70ddf4 554
fe40c0af
HS
555/*
556 * {get|put}_user_try and catch
557 *
558 * get_user_try {
559 * get_user_ex(...);
560 * } get_user_catch(err)
561 */
304ec1b0 562#define get_user_try uaccess_try_nospec
fe40c0af 563#define get_user_catch(err) uaccess_catch(err)
fe40c0af
HS
564
565#define get_user_ex(x, ptr) do { \
566 unsigned long __gue_val; \
567 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
568 (x) = (__force __typeof__(*(ptr)))__gue_val; \
569} while (0)
570
019a1369
HS
571#define put_user_try uaccess_try
572#define put_user_catch(err) uaccess_catch(err)
573
fe40c0af
HS
574#define put_user_ex(x, ptr) \
575 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
576
1ac2e6ca
RR
577extern unsigned long
578copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
92ae03f2
LT
579extern __must_check long
580strncpy_from_user(char *dst, const char __user *src, long count);
1ac2e6ca 581
5723aa99
LT
582extern __must_check long strnlen_user(const char __user *str, long n);
583
a052858f
PA
584unsigned long __must_check clear_user(void __user *mem, unsigned long len);
585unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
586
f09174c5
QR
587extern void __cmpxchg_wrong_size(void)
588 __compiletime_error("Bad argument size for cmpxchg");
589
590#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
591({ \
592 int __ret = 0; \
593 __typeof__(ptr) __uval = (uval); \
594 __typeof__(*(ptr)) __old = (old); \
595 __typeof__(*(ptr)) __new = (new); \
304ec1b0 596 __uaccess_begin_nospec(); \
f09174c5
QR
597 switch (size) { \
598 case 1: \
599 { \
11f1a4b9 600 asm volatile("\n" \
f09174c5 601 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
11f1a4b9 602 "2:\n" \
f09174c5
QR
603 "\t.section .fixup, \"ax\"\n" \
604 "3:\tmov %3, %0\n" \
605 "\tjmp 2b\n" \
606 "\t.previous\n" \
75045f77 607 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
608 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
609 : "i" (-EFAULT), "q" (__new), "1" (__old) \
610 : "memory" \
611 ); \
612 break; \
613 } \
614 case 2: \
615 { \
11f1a4b9 616 asm volatile("\n" \
f09174c5 617 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
11f1a4b9 618 "2:\n" \
f09174c5
QR
619 "\t.section .fixup, \"ax\"\n" \
620 "3:\tmov %3, %0\n" \
621 "\tjmp 2b\n" \
622 "\t.previous\n" \
75045f77 623 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
624 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
625 : "i" (-EFAULT), "r" (__new), "1" (__old) \
626 : "memory" \
627 ); \
628 break; \
629 } \
630 case 4: \
631 { \
11f1a4b9 632 asm volatile("\n" \
f09174c5 633 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
11f1a4b9 634 "2:\n" \
f09174c5
QR
635 "\t.section .fixup, \"ax\"\n" \
636 "3:\tmov %3, %0\n" \
637 "\tjmp 2b\n" \
638 "\t.previous\n" \
75045f77 639 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
640 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
641 : "i" (-EFAULT), "r" (__new), "1" (__old) \
642 : "memory" \
643 ); \
644 break; \
645 } \
646 case 8: \
647 { \
648 if (!IS_ENABLED(CONFIG_X86_64)) \
649 __cmpxchg_wrong_size(); \
650 \
11f1a4b9 651 asm volatile("\n" \
f09174c5 652 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
11f1a4b9 653 "2:\n" \
f09174c5
QR
654 "\t.section .fixup, \"ax\"\n" \
655 "3:\tmov %3, %0\n" \
656 "\tjmp 2b\n" \
657 "\t.previous\n" \
75045f77 658 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
659 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
660 : "i" (-EFAULT), "r" (__new), "1" (__old) \
661 : "memory" \
662 ); \
663 break; \
664 } \
665 default: \
666 __cmpxchg_wrong_size(); \
667 } \
11f1a4b9 668 __uaccess_end(); \
f09174c5
QR
669 *__uval = __old; \
670 __ret; \
671})
672
673#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
674({ \
96d4f267 675 access_ok((ptr), sizeof(*(ptr))) ? \
f09174c5
QR
676 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
677 (old), (new), sizeof(*(ptr))) : \
678 -EFAULT; \
679})
680
8bc7de0c
GC
681/*
682 * movsl can be slow when source and dest are not both 8-byte aligned
683 */
684#ifdef CONFIG_X86_INTEL_USERCOPY
685extern struct movsl_mask {
686 int mask;
687} ____cacheline_aligned_in_smp movsl_mask;
688#endif
689
22cac167
GC
690#define ARCH_HAS_NOCACHE_UACCESS 1
691
96a388de 692#ifdef CONFIG_X86_32
a1ce3928 693# include <asm/uaccess_32.h>
96a388de 694#else
a1ce3928 695# include <asm/uaccess_64.h>
96a388de 696#endif
ca233862 697
10013ebb
AK
698/*
699 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
700 * nested NMI paths are careful to preserve CR2.
701 *
702 * Caller must use pagefault_enable/disable, or run in interrupt context,
703 * and also do a uaccess_ok() check
704 */
705#define __copy_from_user_nmi __copy_from_user_inatomic
706
5b24a7a2
LT
707/*
708 * The "unsafe" user accesses aren't really "unsafe", but the naming
709 * is a big fat warning: you have to not only do the access_ok()
710 * checking before using them, but you have to surround them with the
711 * user_access_begin/end() pair.
712 */
594cc251
LT
713static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
714{
715 if (unlikely(!access_ok(ptr,len)))
716 return 0;
6e693b3f 717 __uaccess_begin_nospec();
594cc251
LT
718 return 1;
719}
720#define user_access_begin(a,b) user_access_begin(a,b)
5b24a7a2
LT
721#define user_access_end() __uaccess_end()
722
a959dc88
LT
723#define unsafe_put_user(x, ptr, label) \
724 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
5b24a7a2 725
1bd4403d
LT
726#define unsafe_get_user(x, ptr, err_label) \
727do { \
5b24a7a2 728 int __gu_err; \
334a023e 729 __inttype(*(ptr)) __gu_val; \
5b24a7a2
LT
730 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
731 (x) = (__force __typeof__(*(ptr)))__gu_val; \
1bd4403d
LT
732 if (unlikely(__gu_err)) goto err_label; \
733} while (0)
5b24a7a2 734
1965aae3 735#endif /* _ASM_X86_UACCESS_H */
8174c430 736