]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/include/asm/uaccess.h
x86/uaccess: Make __get_user_size() Clang compliant on 32-bit
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / include / asm / uaccess.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_UACCESS_H
3#define _ASM_X86_UACCESS_H
ca233862
GC
4/*
5 * User space memory access functions
6 */
ca233862 7#include <linux/compiler.h>
1771c6e1 8#include <linux/kasan-checks.h>
ca233862
GC
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
63bcff2a 12#include <asm/smap.h>
45caf470 13#include <asm/extable.h>
ca233862 14
ca233862
GC
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(-1UL)
9063c61f 26#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
ca233862 27
13d4ea09 28#define get_fs() (current->thread.addr_limit)
5ea0727b
TG
29static inline void set_fs(mm_segment_t fs)
30{
31 current->thread.addr_limit = fs;
32 /* On user-mode return, check fs is correct */
33 set_thread_flag(TIF_FSCHECK);
34}
ca233862
GC
35
36#define segment_eq(a, b) ((a).seg == (b).seg)
13d4ea09 37#define user_addr_max() (current->thread.addr_limit.seg)
002ca169 38
ca233862
GC
39/*
40 * Test whether a block of memory is a valid user space address.
41 * Returns 0 if the range is valid, nonzero otherwise.
ca233862 42 */
a740576a 43static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
c5fe5d80
LT
44{
45 /*
46 * If we have used "sizeof()" for the size,
47 * we know it won't overflow the limit (but
48 * it might overflow the 'addr', so it's
49 * important to subtract the size from the
50 * limit, not add it to the address).
51 */
52 if (__builtin_constant_p(size))
7e0f51cb 53 return unlikely(addr > limit - size);
c5fe5d80
LT
54
55 /* Arbitrary sizes? Be careful about overflow */
56 addr += size;
7e0f51cb 57 if (unlikely(addr < size))
a740576a 58 return true;
7e0f51cb 59 return unlikely(addr > limit);
c5fe5d80 60}
ca233862 61
bc6ca7b3 62#define __range_not_ok(addr, size, limit) \
ca233862 63({ \
ca233862 64 __chk_user_ptr(addr); \
c5fe5d80 65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
ca233862
GC
66})
67
7c478895 68#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
2d8d8fac
MH
69static inline bool pagefault_disabled(void);
70# define WARN_ON_IN_IRQ() \
71 WARN_ON_ONCE(!in_task() && !pagefault_disabled())
7c478895
PZ
72#else
73# define WARN_ON_IN_IRQ()
74#endif
75
ca233862 76/**
bc8ff3ca 77 * access_ok - Checks if a user space pointer is valid
ca233862
GC
78 * @addr: User space pointer to start of block to check
79 * @size: Size of block to check
80 *
b3c395ef
DH
81 * Context: User context only. This function may sleep if pagefaults are
82 * enabled.
ca233862
GC
83 *
84 * Checks if a pointer to a block of memory in user space is valid.
85 *
ca233862
GC
86 * Note that, depending on architecture, this function probably just
87 * checks that the pointer is in the user space range - after calling
88 * this function, memory access functions may still return -EFAULT.
bc8ff3ca
MR
89 *
90 * Return: true (nonzero) if the memory block may be valid, false (zero)
91 * if it is definitely invalid.
ca233862 92 */
96d4f267 93#define access_ok(addr, size) \
7c478895
PZ
94({ \
95 WARN_ON_IN_IRQ(); \
96 likely(!__range_not_ok(addr, size, user_addr_max())); \
97})
ca233862 98
ca233862
GC
99/*
100 * These are the main single-value transfer routines. They automatically
101 * use the right size if we just have the right pointer type.
102 *
103 * This gets kind of ugly. We want to return _two_ values in "get_user()"
104 * and yet we don't want to do any pointers, because that is too much
105 * of a performance impact. Thus we have a few rather ugly macros here,
106 * and hide all the ugliness from the user.
107 *
108 * The "__xxx" versions of the user access functions are versions that
109 * do not verify the address space, that must have been done previously
110 * with a separate "access_ok()" call (this is used when we do multiple
111 * accesses to the same area of user memory).
112 */
113
114extern int __get_user_1(void);
115extern int __get_user_2(void);
116extern int __get_user_4(void);
117extern int __get_user_8(void);
118extern int __get_user_bad(void);
119
11f1a4b9
LT
120#define __uaccess_begin() stac()
121#define __uaccess_end() clac()
b3bbfb3f
DW
122#define __uaccess_begin_nospec() \
123({ \
124 stac(); \
125 barrier_nospec(); \
126})
11f1a4b9 127
3578baae 128/*
7da63b3d
LT
129 * This is the smallest unsigned integer type that can fit a value
130 * (up to 'long long')
3578baae 131 */
7da63b3d
LT
132#define __inttype(x) __typeof__( \
133 __typefits(x,char, \
134 __typefits(x,short, \
135 __typefits(x,int, \
136 __typefits(x,long,0ULL)))))
137
138#define __typefits(x,type,not) \
139 __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
865e5b76
GC
140
141/**
bc8ff3ca 142 * get_user - Get a simple variable from user space.
865e5b76
GC
143 * @x: Variable to store result.
144 * @ptr: Source address, in user space.
145 *
b3c395ef
DH
146 * Context: User context only. This function may sleep if pagefaults are
147 * enabled.
865e5b76
GC
148 *
149 * This macro copies a single simple variable from user space to kernel
150 * space. It supports simple types like char and int, but not larger
151 * data types like structures or arrays.
152 *
153 * @ptr must have pointer-to-simple-variable type, and the result of
154 * dereferencing @ptr must be assignable to @x without a cast.
155 *
bc8ff3ca 156 * Return: zero on success, or -EFAULT on error.
865e5b76 157 * On error, the variable @x is set to zero.
ff52c3b0
PA
158 */
159/*
3578baae
PA
160 * Careful: we have to cast the result to the type of the pointer
161 * for sign reasons.
ff52c3b0 162 *
f69fa9a9 163 * The use of _ASM_DX as the register specifier is a bit of a
ff52c3b0
PA
164 * simplification, as gcc only cares about it as the starting point
165 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
166 * (%ecx being the next register in gcc's x86 register sequence), and
167 * %rdx on 64 bits.
f69fa9a9
PA
168 *
169 * Clang/LLVM cares about the size of the register, but still wants
170 * the base register for something that ends up being a pair.
865e5b76 171 */
865e5b76
GC
172#define get_user(x, ptr) \
173({ \
174 int __ret_gu; \
bdfc017e 175 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
865e5b76 176 __chk_user_ptr(ptr); \
3ee1afa3 177 might_fault(); \
f05058c4 178 asm volatile("call __get_user_%P4" \
f5caf621
JP
179 : "=a" (__ret_gu), "=r" (__val_gu), \
180 ASM_CALL_CONSTRAINT \
3578baae 181 : "0" (ptr), "i" (sizeof(*(ptr)))); \
e182c570 182 (x) = (__force __typeof__(*(ptr))) __val_gu; \
a76cf66e 183 __builtin_expect(__ret_gu, 0); \
865e5b76
GC
184})
185
e30a44fd
GC
186#define __put_user_x(size, x, ptr, __ret_pu) \
187 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
4d5d7838 188 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
e30a44fd
GC
189
190
191
dc70ddf4 192#ifdef CONFIG_X86_32
a959dc88
LT
193#define __put_user_goto_u64(x, addr, label) \
194 asm_volatile_goto("\n" \
195 "1: movl %%eax,0(%1)\n" \
196 "2: movl %%edx,4(%1)\n" \
197 _ASM_EXTABLE_UA(1b, %l2) \
198 _ASM_EXTABLE_UA(2b, %l2) \
199 : : "A" (x), "r" (addr) \
200 : : label)
e30a44fd
GC
201
202#define __put_user_x8(x, ptr, __ret_pu) \
203 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
204 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
dc70ddf4 205#else
a959dc88 206#define __put_user_goto_u64(x, ptr, label) \
36807856 207 __put_user_goto(x, ptr, "q", "er", label)
e30a44fd 208#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
dc70ddf4
GC
209#endif
210
e30a44fd
GC
211extern void __put_user_bad(void);
212
213/*
214 * Strange magic calling convention: pointer in %ecx,
215 * value in %eax(:%edx), return value in %eax. clobbers %rbx
216 */
217extern void __put_user_1(void);
218extern void __put_user_2(void);
219extern void __put_user_4(void);
220extern void __put_user_8(void);
221
e30a44fd 222/**
bc8ff3ca 223 * put_user - Write a simple value into user space.
e30a44fd
GC
224 * @x: Value to copy to user space.
225 * @ptr: Destination address, in user space.
226 *
b3c395ef
DH
227 * Context: User context only. This function may sleep if pagefaults are
228 * enabled.
e30a44fd
GC
229 *
230 * This macro copies a single simple value from kernel space to user
231 * space. It supports simple types like char and int, but not larger
232 * data types like structures or arrays.
233 *
234 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
235 * to the result of dereferencing @ptr.
236 *
bc8ff3ca 237 * Return: zero on success, or -EFAULT on error.
e30a44fd
GC
238 */
239#define put_user(x, ptr) \
240({ \
241 int __ret_pu; \
242 __typeof__(*(ptr)) __pu_val; \
243 __chk_user_ptr(ptr); \
3ee1afa3 244 might_fault(); \
e30a44fd
GC
245 __pu_val = x; \
246 switch (sizeof(*(ptr))) { \
247 case 1: \
248 __put_user_x(1, __pu_val, ptr, __ret_pu); \
249 break; \
250 case 2: \
251 __put_user_x(2, __pu_val, ptr, __ret_pu); \
252 break; \
253 case 4: \
254 __put_user_x(4, __pu_val, ptr, __ret_pu); \
255 break; \
256 case 8: \
257 __put_user_x8(__pu_val, ptr, __ret_pu); \
258 break; \
259 default: \
260 __put_user_x(X, __pu_val, ptr, __ret_pu); \
261 break; \
262 } \
a76cf66e 263 __builtin_expect(__ret_pu, 0); \
e30a44fd
GC
264})
265
a959dc88 266#define __put_user_size(x, ptr, size, label) \
dc70ddf4 267do { \
dc70ddf4
GC
268 __chk_user_ptr(ptr); \
269 switch (size) { \
270 case 1: \
36807856 271 __put_user_goto(x, ptr, "b", "iq", label); \
dc70ddf4
GC
272 break; \
273 case 2: \
36807856 274 __put_user_goto(x, ptr, "w", "ir", label); \
dc70ddf4
GC
275 break; \
276 case 4: \
36807856 277 __put_user_goto(x, ptr, "l", "ir", label); \
dc70ddf4
GC
278 break; \
279 case 8: \
2a418cf3 280 __put_user_goto_u64(x, ptr, label); \
dc70ddf4
GC
281 break; \
282 default: \
283 __put_user_bad(); \
284 } \
285} while (0)
286
3f168221 287#ifdef CONFIG_X86_32
1a323ea5 288#define __get_user_asm_u64(x, ptr, retval) \
b2f68038
BL
289({ \
290 __typeof__(ptr) __ptr = (ptr); \
1a323ea5 291 asm volatile("\n" \
890f0b0d
LT
292 "1: movl %[lowbits],%%eax\n" \
293 "2: movl %[highbits],%%edx\n" \
1a323ea5 294 "3:\n" \
b2f68038 295 ".section .fixup,\"ax\"\n" \
890f0b0d 296 "4: mov %[efault],%[errout]\n" \
b2f68038
BL
297 " xorl %%eax,%%eax\n" \
298 " xorl %%edx,%%edx\n" \
299 " jmp 3b\n" \
300 ".previous\n" \
75045f77
JH
301 _ASM_EXTABLE_UA(1b, 4b) \
302 _ASM_EXTABLE_UA(2b, 4b) \
890f0b0d
LT
303 : [errout] "=r" (retval), \
304 [output] "=&A"(x) \
305 : [lowbits] "m" (__m(__ptr)), \
306 [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
307 [efault] "i" (-EFAULT), "0" (retval)); \
b2f68038
BL
308})
309
3f168221 310#else
1a323ea5 311#define __get_user_asm_u64(x, ptr, retval) \
7da63b3d 312 __get_user_asm(x, ptr, retval, "q", "=r")
3f168221
GC
313#endif
314
1a323ea5 315#define __get_user_size(x, ptr, size, retval) \
3f168221 316do { \
158807de
ND
317 unsigned char x_u8__; \
318 \
3f168221
GC
319 retval = 0; \
320 __chk_user_ptr(ptr); \
321 switch (size) { \
322 case 1: \
158807de
ND
323 __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \
324 (x) = x_u8__; \
3f168221
GC
325 break; \
326 case 2: \
7da63b3d 327 __get_user_asm(x, ptr, retval, "w", "=r"); \
3f168221
GC
328 break; \
329 case 4: \
7da63b3d 330 __get_user_asm(x, ptr, retval, "l", "=r"); \
3f168221
GC
331 break; \
332 case 8: \
1a323ea5 333 __get_user_asm_u64(x, ptr, retval); \
3f168221
GC
334 break; \
335 default: \
336 (x) = __get_user_bad(); \
337 } \
338} while (0)
339
7da63b3d 340#define __get_user_asm(x, addr, err, itype, ltype) \
11f1a4b9 341 asm volatile("\n" \
890f0b0d 342 "1: mov"itype" %[umem],%[output]\n" \
11f1a4b9 343 "2:\n" \
3f168221 344 ".section .fixup,\"ax\"\n" \
890f0b0d
LT
345 "3: mov %[efault],%[errout]\n" \
346 " xor"itype" %[output],%[output]\n" \
3f168221
GC
347 " jmp 2b\n" \
348 ".previous\n" \
75045f77 349 _ASM_EXTABLE_UA(1b, 3b) \
890f0b0d
LT
350 : [errout] "=r" (err), \
351 [output] ltype(x) \
352 : [umem] "m" (__m(addr)), \
353 [efault] "i" (-EFAULT), "0" (err))
3f168221 354
dc70ddf4
GC
355#define __put_user_nocheck(x, ptr, size) \
356({ \
a959dc88
LT
357 __label__ __pu_label; \
358 int __pu_err = -EFAULT; \
6ae86561
PZ
359 __typeof__(*(ptr)) __pu_val = (x); \
360 __typeof__(ptr) __pu_ptr = (ptr); \
361 __typeof__(size) __pu_size = (size); \
11f1a4b9 362 __uaccess_begin(); \
6ae86561 363 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
a959dc88
LT
364 __pu_err = 0; \
365__pu_label: \
11f1a4b9 366 __uaccess_end(); \
a76cf66e 367 __builtin_expect(__pu_err, 0); \
dc70ddf4
GC
368})
369
3f168221
GC
370#define __get_user_nocheck(x, ptr, size) \
371({ \
16855f87 372 int __gu_err; \
b2f68038 373 __inttype(*(ptr)) __gu_val; \
9b8bd476
PZ
374 __typeof__(ptr) __gu_ptr = (ptr); \
375 __typeof__(size) __gu_size = (size); \
304ec1b0 376 __uaccess_begin_nospec(); \
1a323ea5 377 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err); \
11f1a4b9 378 __uaccess_end(); \
3f168221 379 (x) = (__force __typeof__(*(ptr)))__gu_val; \
a76cf66e 380 __builtin_expect(__gu_err, 0); \
3f168221 381})
dc70ddf4
GC
382
383/* FIXME: this hack is definitely wrong -AK */
384struct __large_struct { unsigned long buf[100]; };
385#define __m(x) (*(struct __large_struct __user *)(x))
386
387/*
388 * Tell gcc we read from memory instead of writing: this is because
389 * we do not write to any memory gcc knows about, so there are no
390 * aliasing issues.
391 */
36807856 392#define __put_user_goto(x, addr, itype, ltype, label) \
4a789213 393 asm_volatile_goto("\n" \
36807856
LT
394 "1: mov"itype" %0,%1\n" \
395 _ASM_EXTABLE_UA(1b, %l2) \
4a789213
LT
396 : : ltype(x), "m" (__m(addr)) \
397 : : label)
398
8cb834e9 399/**
bc8ff3ca 400 * __get_user - Get a simple variable from user space, with less checking.
8cb834e9
GC
401 * @x: Variable to store result.
402 * @ptr: Source address, in user space.
403 *
b3c395ef
DH
404 * Context: User context only. This function may sleep if pagefaults are
405 * enabled.
8cb834e9
GC
406 *
407 * This macro copies a single simple variable from user space to kernel
408 * space. It supports simple types like char and int, but not larger
409 * data types like structures or arrays.
410 *
411 * @ptr must have pointer-to-simple-variable type, and the result of
412 * dereferencing @ptr must be assignable to @x without a cast.
413 *
414 * Caller must check the pointer with access_ok() before calling this
415 * function.
416 *
bc8ff3ca 417 * Return: zero on success, or -EFAULT on error.
8cb834e9
GC
418 * On error, the variable @x is set to zero.
419 */
420
421#define __get_user(x, ptr) \
422 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
fe40c0af 423
8cb834e9 424/**
bc8ff3ca 425 * __put_user - Write a simple value into user space, with less checking.
8cb834e9
GC
426 * @x: Value to copy to user space.
427 * @ptr: Destination address, in user space.
428 *
b3c395ef
DH
429 * Context: User context only. This function may sleep if pagefaults are
430 * enabled.
8cb834e9
GC
431 *
432 * This macro copies a single simple value from kernel space to user
433 * space. It supports simple types like char and int, but not larger
434 * data types like structures or arrays.
435 *
436 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
437 * to the result of dereferencing @ptr.
438 *
439 * Caller must check the pointer with access_ok() before calling this
440 * function.
441 *
bc8ff3ca 442 * Return: zero on success, or -EFAULT on error.
8cb834e9
GC
443 */
444
445#define __put_user(x, ptr) \
446 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
dc70ddf4 447
1ac2e6ca
RR
448extern unsigned long
449copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
92ae03f2
LT
450extern __must_check long
451strncpy_from_user(char *dst, const char __user *src, long count);
1ac2e6ca 452
5723aa99
LT
453extern __must_check long strnlen_user(const char __user *str, long n);
454
a052858f
PA
455unsigned long __must_check clear_user(void __user *mem, unsigned long len);
456unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
457
8bc7de0c
GC
458/*
459 * movsl can be slow when source and dest are not both 8-byte aligned
460 */
461#ifdef CONFIG_X86_INTEL_USERCOPY
462extern struct movsl_mask {
463 int mask;
464} ____cacheline_aligned_in_smp movsl_mask;
465#endif
466
22cac167
GC
467#define ARCH_HAS_NOCACHE_UACCESS 1
468
96a388de 469#ifdef CONFIG_X86_32
a1ce3928 470# include <asm/uaccess_32.h>
96a388de 471#else
a1ce3928 472# include <asm/uaccess_64.h>
96a388de 473#endif
ca233862 474
5b24a7a2
LT
475/*
476 * The "unsafe" user accesses aren't really "unsafe", but the naming
477 * is a big fat warning: you have to not only do the access_ok()
478 * checking before using them, but you have to surround them with the
479 * user_access_begin/end() pair.
480 */
b7f89bfe 481static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
594cc251
LT
482{
483 if (unlikely(!access_ok(ptr,len)))
484 return 0;
6e693b3f 485 __uaccess_begin_nospec();
594cc251
LT
486 return 1;
487}
488#define user_access_begin(a,b) user_access_begin(a,b)
5b24a7a2
LT
489#define user_access_end() __uaccess_end()
490
e74deb11
PZ
491#define user_access_save() smap_save()
492#define user_access_restore(x) smap_restore(x)
493
a959dc88
LT
494#define unsafe_put_user(x, ptr, label) \
495 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
5b24a7a2 496
1bd4403d
LT
497#define unsafe_get_user(x, ptr, err_label) \
498do { \
5b24a7a2 499 int __gu_err; \
334a023e 500 __inttype(*(ptr)) __gu_val; \
1a323ea5 501 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \
5b24a7a2 502 (x) = (__force __typeof__(*(ptr)))__gu_val; \
1bd4403d
LT
503 if (unlikely(__gu_err)) goto err_label; \
504} while (0)
5b24a7a2 505
c512c691
LT
506/*
507 * We want the unsafe accessors to always be inlined and use
508 * the error labels - thus the macro games.
509 */
3beff76b
LT
510#define unsafe_copy_loop(dst, src, len, type, label) \
511 while (len >= sizeof(type)) { \
512 unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
513 dst += sizeof(type); \
514 src += sizeof(type); \
515 len -= sizeof(type); \
c512c691
LT
516 }
517
518#define unsafe_copy_to_user(_dst,_src,_len,label) \
519do { \
520 char __user *__ucu_dst = (_dst); \
521 const char *__ucu_src = (_src); \
522 size_t __ucu_len = (_len); \
523 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
524 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
525 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
526 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
527} while (0)
528
fa94111d
CH
529#define HAVE_GET_KERNEL_NOFAULT
530
531#define __get_kernel_nofault(dst, src, type, err_label) \
532do { \
533 int __kr_err; \
534 \
3beff76b 535 __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
fa94111d
CH
536 sizeof(type), __kr_err); \
537 if (unlikely(__kr_err)) \
538 goto err_label; \
539} while (0)
540
541#define __put_kernel_nofault(dst, src, type, err_label) \
542 __put_user_size(*((type *)(src)), (__force type __user *)(dst), \
543 sizeof(type), err_label)
544
1965aae3 545#endif /* _ASM_X86_UACCESS_H */
8174c430 546