1 #ifndef _ASM_X86_UACCESS_H
2 #define _ASM_X86_UACCESS_H
4 * User space memory access functions
6 #include <linux/errno.h>
7 #include <linux/compiler.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
15 #define VERIFY_WRITE 1
18 * The fs value determines whether argument validity checking should be
19 * performed or not. If get_fs() == USER_DS, checking is performed, with
20 * get_fs() == KERNEL_DS, checking is bypassed.
22 * For historical reasons, these macros are grossly misnamed.
25 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
27 #define KERNEL_DS MAKE_MM_SEG(-1UL)
28 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
30 #define get_ds() (KERNEL_DS)
31 #define get_fs() (current_thread_info()->addr_limit)
32 #define set_fs(x) (current_thread_info()->addr_limit = (x))
34 #define segment_eq(a, b) ((a).seg == (b).seg)
36 #define user_addr_max() (current_thread_info()->addr_limit.seg)
37 #define __addr_ok(addr) \
38 ((unsigned long __force)(addr) < user_addr_max())
41 * Test whether a block of memory is a valid user space address.
42 * Returns 0 if the range is valid, nonzero otherwise.
44 static inline bool __chk_range_not_ok(unsigned long addr
, unsigned long size
, unsigned long limit
)
47 * If we have used "sizeof()" for the size,
48 * we know it won't overflow the limit (but
49 * it might overflow the 'addr', so it's
50 * important to subtract the size from the
51 * limit, not add it to the address).
53 if (__builtin_constant_p(size
))
54 return unlikely(addr
> limit
- size
);
56 /* Arbitrary sizes? Be careful about overflow */
58 if (unlikely(addr
< size
))
60 return unlikely(addr
> limit
);
63 #define __range_not_ok(addr, size, limit) \
65 __chk_user_ptr(addr); \
66 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
70 * access_ok: - Checks if a user space pointer is valid
71 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
72 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
73 * to write to a block, it is always safe to read from it.
74 * @addr: User space pointer to start of block to check
75 * @size: Size of block to check
77 * Context: User context only. This function may sleep if pagefaults are
80 * Checks if a pointer to a block of memory in user space is valid.
82 * Returns true (nonzero) if the memory block may be valid, false (zero)
83 * if it is definitely invalid.
85 * Note that, depending on architecture, this function probably just
86 * checks that the pointer is in the user space range - after calling
87 * this function, memory access functions may still return -EFAULT.
89 #define access_ok(type, addr, size) \
90 likely(!__range_not_ok(addr, size, user_addr_max()))
93 * The exception table consists of triples of addresses relative to the
94 * exception table entry itself. The first address is of an instruction
95 * that is allowed to fault, the second is the target at which the program
96 * should continue. The third is a handler function to deal with the fault
97 * caused by the instruction in the first field.
99 * All the routines below use bits of fixup code that are out of line
100 * with the main instruction path. This means when everything is well,
101 * we don't even have to jump over them. Further, they do not intrude
102 * on our cache or tlb entries.
105 struct exception_table_entry
{
106 int insn
, fixup
, handler
;
109 #define ARCH_HAS_RELATIVE_EXTABLE
111 #define swap_ex_entry_fixup(a, b, tmp, delta) \
113 (a)->fixup = (b)->fixup + (delta); \
114 (b)->fixup = (tmp).fixup - (delta); \
115 (a)->handler = (b)->handler + (delta); \
116 (b)->handler = (tmp).handler - (delta); \
119 extern int fixup_exception(struct pt_regs
*regs
, int trapnr
);
120 extern bool ex_has_fault_handler(unsigned long ip
);
121 extern void early_fixup_exception(struct pt_regs
*regs
, int trapnr
);
124 * These are the main single-value transfer routines. They automatically
125 * use the right size if we just have the right pointer type.
127 * This gets kind of ugly. We want to return _two_ values in "get_user()"
128 * and yet we don't want to do any pointers, because that is too much
129 * of a performance impact. Thus we have a few rather ugly macros here,
130 * and hide all the ugliness from the user.
132 * The "__xxx" versions of the user access functions are versions that
133 * do not verify the address space, that must have been done previously
134 * with a separate "access_ok()" call (this is used when we do multiple
135 * accesses to the same area of user memory).
138 extern int __get_user_1(void);
139 extern int __get_user_2(void);
140 extern int __get_user_4(void);
141 extern int __get_user_8(void);
142 extern int __get_user_bad(void);
144 #define __uaccess_begin() stac()
145 #define __uaccess_end() clac()
148 * This is a type: either unsigned long, if the argument fits into
149 * that type, or otherwise unsigned long long.
151 #define __inttype(x) \
152 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
155 * get_user: - Get a simple variable from user space.
156 * @x: Variable to store result.
157 * @ptr: Source address, in user space.
159 * Context: User context only. This function may sleep if pagefaults are
162 * This macro copies a single simple variable from user space to kernel
163 * space. It supports simple types like char and int, but not larger
164 * data types like structures or arrays.
166 * @ptr must have pointer-to-simple-variable type, and the result of
167 * dereferencing @ptr must be assignable to @x without a cast.
169 * Returns zero on success, or -EFAULT on error.
170 * On error, the variable @x is set to zero.
173 * Careful: we have to cast the result to the type of the pointer
176 * The use of _ASM_DX as the register specifier is a bit of a
177 * simplification, as gcc only cares about it as the starting point
178 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
179 * (%ecx being the next register in gcc's x86 register sequence), and
182 * Clang/LLVM cares about the size of the register, but still wants
183 * the base register for something that ends up being a pair.
185 #define get_user(x, ptr) \
188 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
189 register void *__sp asm(_ASM_SP); \
190 __chk_user_ptr(ptr); \
192 asm volatile("call __get_user_%P4" \
193 : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp) \
194 : "0" (ptr), "i" (sizeof(*(ptr)))); \
195 (x) = (__force __typeof__(*(ptr))) __val_gu; \
196 __builtin_expect(__ret_gu, 0); \
199 #define __put_user_x(size, x, ptr, __ret_pu) \
200 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
201 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
206 #define __put_user_asm_u64(x, addr, err, errret) \
208 "1: movl %%eax,0(%2)\n" \
209 "2: movl %%edx,4(%2)\n" \
211 ".section .fixup,\"ax\"\n" \
215 _ASM_EXTABLE(1b, 4b) \
216 _ASM_EXTABLE(2b, 4b) \
218 : "A" (x), "r" (addr), "i" (errret), "0" (err))
220 #define __put_user_asm_ex_u64(x, addr) \
222 "1: movl %%eax,0(%1)\n" \
223 "2: movl %%edx,4(%1)\n" \
225 _ASM_EXTABLE_EX(1b, 2b) \
226 _ASM_EXTABLE_EX(2b, 3b) \
227 : : "A" (x), "r" (addr))
229 #define __put_user_x8(x, ptr, __ret_pu) \
230 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
231 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
233 #define __put_user_asm_u64(x, ptr, retval, errret) \
234 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
235 #define __put_user_asm_ex_u64(x, addr) \
236 __put_user_asm_ex(x, addr, "q", "", "er")
237 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
240 extern void __put_user_bad(void);
243 * Strange magic calling convention: pointer in %ecx,
244 * value in %eax(:%edx), return value in %eax. clobbers %rbx
246 extern void __put_user_1(void);
247 extern void __put_user_2(void);
248 extern void __put_user_4(void);
249 extern void __put_user_8(void);
252 * put_user: - Write a simple value into user space.
253 * @x: Value to copy to user space.
254 * @ptr: Destination address, in user space.
256 * Context: User context only. This function may sleep if pagefaults are
259 * This macro copies a single simple value from kernel space to user
260 * space. It supports simple types like char and int, but not larger
261 * data types like structures or arrays.
263 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
264 * to the result of dereferencing @ptr.
266 * Returns zero on success, or -EFAULT on error.
268 #define put_user(x, ptr) \
271 __typeof__(*(ptr)) __pu_val; \
272 __chk_user_ptr(ptr); \
275 switch (sizeof(*(ptr))) { \
277 __put_user_x(1, __pu_val, ptr, __ret_pu); \
280 __put_user_x(2, __pu_val, ptr, __ret_pu); \
283 __put_user_x(4, __pu_val, ptr, __ret_pu); \
286 __put_user_x8(__pu_val, ptr, __ret_pu); \
289 __put_user_x(X, __pu_val, ptr, __ret_pu); \
292 __builtin_expect(__ret_pu, 0); \
295 #define __put_user_size(x, ptr, size, retval, errret) \
298 __chk_user_ptr(ptr); \
301 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
304 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
307 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
310 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
319 * This doesn't do __uaccess_begin/end - the exception handling
320 * around it must do that.
322 #define __put_user_size_ex(x, ptr, size) \
324 __chk_user_ptr(ptr); \
327 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
330 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
333 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
336 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
344 #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad()
345 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
347 #define __get_user_asm_u64(x, ptr, retval, errret) \
348 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
349 #define __get_user_asm_ex_u64(x, ptr) \
350 __get_user_asm_ex(x, ptr, "q", "", "=r")
353 #define __get_user_size(x, ptr, size, retval, errret) \
356 __chk_user_ptr(ptr); \
359 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
362 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
365 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
368 __get_user_asm_u64(x, ptr, retval, errret); \
371 (x) = __get_user_bad(); \
375 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
377 "1: mov"itype" %2,%"rtype"1\n" \
379 ".section .fixup,\"ax\"\n" \
381 " xor"itype" %"rtype"1,%"rtype"1\n" \
384 _ASM_EXTABLE(1b, 3b) \
385 : "=r" (err), ltype(x) \
386 : "m" (__m(addr)), "i" (errret), "0" (err))
389 * This doesn't do __uaccess_begin/end - the exception handling
390 * around it must do that.
392 #define __get_user_size_ex(x, ptr, size) \
394 __chk_user_ptr(ptr); \
397 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
400 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
403 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
406 __get_user_asm_ex_u64(x, ptr); \
409 (x) = __get_user_bad(); \
413 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
414 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
416 _ASM_EXTABLE_EX(1b, 2b) \
417 : ltype(x) : "m" (__m(addr)))
419 #define __put_user_nocheck(x, ptr, size) \
423 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
425 __builtin_expect(__pu_err, 0); \
428 #define __get_user_nocheck(x, ptr, size) \
431 unsigned long __gu_val; \
433 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
435 (x) = (__force __typeof__(*(ptr)))__gu_val; \
436 __builtin_expect(__gu_err, 0); \
439 /* FIXME: this hack is definitely wrong -AK */
440 struct __large_struct
{ unsigned long buf
[100]; };
441 #define __m(x) (*(struct __large_struct __user *)(x))
444 * Tell gcc we read from memory instead of writing: this is because
445 * we do not write to any memory gcc knows about, so there are no
448 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
450 "1: mov"itype" %"rtype"1,%2\n" \
452 ".section .fixup,\"ax\"\n" \
456 _ASM_EXTABLE(1b, 3b) \
458 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
460 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
461 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
463 _ASM_EXTABLE_EX(1b, 2b) \
464 : : ltype(x), "m" (__m(addr)))
467 * uaccess_try and catch
469 #define uaccess_try do { \
470 current_thread_info()->uaccess_err = 0; \
474 #define uaccess_catch(err) \
476 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
480 * __get_user: - Get a simple variable from user space, with less checking.
481 * @x: Variable to store result.
482 * @ptr: Source address, in user space.
484 * Context: User context only. This function may sleep if pagefaults are
487 * This macro copies a single simple variable from user space to kernel
488 * space. It supports simple types like char and int, but not larger
489 * data types like structures or arrays.
491 * @ptr must have pointer-to-simple-variable type, and the result of
492 * dereferencing @ptr must be assignable to @x without a cast.
494 * Caller must check the pointer with access_ok() before calling this
497 * Returns zero on success, or -EFAULT on error.
498 * On error, the variable @x is set to zero.
501 #define __get_user(x, ptr) \
502 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
505 * __put_user: - Write a simple value into user space, with less checking.
506 * @x: Value to copy to user space.
507 * @ptr: Destination address, in user space.
509 * Context: User context only. This function may sleep if pagefaults are
512 * This macro copies a single simple value from kernel space to user
513 * space. It supports simple types like char and int, but not larger
514 * data types like structures or arrays.
516 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
517 * to the result of dereferencing @ptr.
519 * Caller must check the pointer with access_ok() before calling this
522 * Returns zero on success, or -EFAULT on error.
525 #define __put_user(x, ptr) \
526 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
528 #define __get_user_unaligned __get_user
529 #define __put_user_unaligned __put_user
532 * {get|put}_user_try and catch
536 * } get_user_catch(err)
538 #define get_user_try uaccess_try
539 #define get_user_catch(err) uaccess_catch(err)
541 #define get_user_ex(x, ptr) do { \
542 unsigned long __gue_val; \
543 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
544 (x) = (__force __typeof__(*(ptr)))__gue_val; \
547 #define put_user_try uaccess_try
548 #define put_user_catch(err) uaccess_catch(err)
550 #define put_user_ex(x, ptr) \
551 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
554 copy_from_user_nmi(void *to
, const void __user
*from
, unsigned long n
);
555 extern __must_check
long
556 strncpy_from_user(char *dst
, const char __user
*src
, long count
);
558 extern __must_check
long strlen_user(const char __user
*str
);
559 extern __must_check
long strnlen_user(const char __user
*str
, long n
);
561 unsigned long __must_check
clear_user(void __user
*mem
, unsigned long len
);
562 unsigned long __must_check
__clear_user(void __user
*mem
, unsigned long len
);
564 extern void __cmpxchg_wrong_size(void)
565 __compiletime_error("Bad argument size for cmpxchg");
567 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
570 __typeof__(ptr) __uval = (uval); \
571 __typeof__(*(ptr)) __old = (old); \
572 __typeof__(*(ptr)) __new = (new); \
578 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
580 "\t.section .fixup, \"ax\"\n" \
584 _ASM_EXTABLE(1b, 3b) \
585 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
586 : "i" (-EFAULT), "q" (__new), "1" (__old) \
594 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
596 "\t.section .fixup, \"ax\"\n" \
600 _ASM_EXTABLE(1b, 3b) \
601 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
602 : "i" (-EFAULT), "r" (__new), "1" (__old) \
610 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
612 "\t.section .fixup, \"ax\"\n" \
616 _ASM_EXTABLE(1b, 3b) \
617 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
618 : "i" (-EFAULT), "r" (__new), "1" (__old) \
625 if (!IS_ENABLED(CONFIG_X86_64)) \
626 __cmpxchg_wrong_size(); \
629 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
631 "\t.section .fixup, \"ax\"\n" \
635 _ASM_EXTABLE(1b, 3b) \
636 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
637 : "i" (-EFAULT), "r" (__new), "1" (__old) \
643 __cmpxchg_wrong_size(); \
650 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
652 access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
653 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
654 (old), (new), sizeof(*(ptr))) : \
659 * movsl can be slow when source and dest are not both 8-byte aligned
661 #ifdef CONFIG_X86_INTEL_USERCOPY
662 extern struct movsl_mask
{
664 } ____cacheline_aligned_in_smp movsl_mask
;
667 #define ARCH_HAS_NOCACHE_UACCESS 1
670 # include <asm/uaccess_32.h>
672 # include <asm/uaccess_64.h>
675 unsigned long __must_check
_copy_from_user(void *to
, const void __user
*from
,
677 unsigned long __must_check
_copy_to_user(void __user
*to
, const void *from
,
680 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
681 # define copy_user_diag __compiletime_error
683 # define copy_user_diag __compiletime_warning
686 extern void copy_user_diag("copy_from_user() buffer size is too small")
687 copy_from_user_overflow(void);
688 extern void copy_user_diag("copy_to_user() buffer size is too small")
689 copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
691 #undef copy_user_diag
693 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
696 __compiletime_warning("copy_from_user() buffer size is not provably correct")
697 __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
698 #define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
701 __compiletime_warning("copy_to_user() buffer size is not provably correct")
702 __copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
703 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
708 __copy_from_user_overflow(int size
, unsigned long count
)
710 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size
, count
);
713 #define __copy_to_user_overflow __copy_from_user_overflow
717 static inline unsigned long __must_check
718 copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
720 int sz
= __compiletime_object_size(to
);
725 * While we would like to have the compiler do the checking for us
726 * even in the non-constant size case, any false positives there are
727 * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
728 * without - the [hopefully] dangerous looking nature of the warning
729 * would make people go look at the respecitive call sites over and
730 * over again just to find that there's no problem).
732 * And there are cases where it's just not realistic for the compiler
733 * to prove the count to be in range. For example when multiple call
734 * sites of a helper function - perhaps in different source files -
735 * all doing proper range checking, yet the helper function not doing
738 * Therefore limit the compile time checking to the constant size
739 * case, and do only runtime checking for non-constant sizes.
742 if (likely(sz
< 0 || sz
>= n
))
743 n
= _copy_from_user(to
, from
, n
);
744 else if(__builtin_constant_p(n
))
745 copy_from_user_overflow();
747 __copy_from_user_overflow(sz
, n
);
752 static inline unsigned long __must_check
753 copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
755 int sz
= __compiletime_object_size(from
);
759 /* See the comment in copy_from_user() above. */
760 if (likely(sz
< 0 || sz
>= n
))
761 n
= _copy_to_user(to
, from
, n
);
762 else if(__builtin_constant_p(n
))
763 copy_to_user_overflow();
765 __copy_to_user_overflow(sz
, n
);
770 #undef __copy_from_user_overflow
771 #undef __copy_to_user_overflow
774 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
775 * nested NMI paths are careful to preserve CR2.
777 * Caller must use pagefault_enable/disable, or run in interrupt context,
778 * and also do a uaccess_ok() check
780 #define __copy_from_user_nmi __copy_from_user_inatomic
783 * The "unsafe" user accesses aren't really "unsafe", but the naming
784 * is a big fat warning: you have to not only do the access_ok()
785 * checking before using them, but you have to surround them with the
786 * user_access_begin/end() pair.
788 #define user_access_begin() __uaccess_begin()
789 #define user_access_end() __uaccess_end()
791 #define unsafe_put_user(x, ptr) \
794 __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
795 __builtin_expect(__pu_err, 0); \
798 #define unsafe_get_user(x, ptr) \
801 unsigned long __gu_val; \
802 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
803 (x) = (__force __typeof__(*(ptr)))__gu_val; \
804 __builtin_expect(__gu_err, 0); \
807 #endif /* _ASM_X86_UACCESS_H */