2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2007 Maciej W. Rozycki
9 * Copyright (C) 2014, Imagination Technologies Ltd.
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <linux/string.h>
18 #include <asm/asm-eva.h>
19 #include <asm/extable.h>
22 * The fs value determines whether argument validity checking should be
23 * performed or not. If get_fs() == USER_DS, checking is performed, with
24 * get_fs() == KERNEL_DS, checking is bypassed.
26 * For historical reasons, these macros are grossly misnamed.
30 #ifdef CONFIG_KVM_GUEST
31 #define __UA_LIMIT 0x40000000UL
33 #define __UA_LIMIT 0x80000000UL
36 #define __UA_ADDR ".word"
38 #define __UA_ADDU "addu"
42 #endif /* CONFIG_32BIT */
46 extern u64 __ua_limit
;
48 #define __UA_LIMIT __ua_limit
50 #define __UA_ADDR ".dword"
52 #define __UA_ADDU "daddu"
56 #endif /* CONFIG_64BIT */
59 * USER_DS is a bitmask that has the bits set that may not be set in a valid
60 * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
61 * the arithmetic we're doing only works if the limit is a power of two, so
62 * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
63 * address in this range it's the process's problem, not ours :-)
66 #ifdef CONFIG_KVM_GUEST
67 #define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
68 #define USER_DS ((mm_segment_t) { 0xC0000000UL })
70 #define KERNEL_DS ((mm_segment_t) { 0UL })
71 #define USER_DS ((mm_segment_t) { __UA_LIMIT })
75 #define VERIFY_WRITE 1
77 #define get_ds() (KERNEL_DS)
78 #define get_fs() (current_thread_info()->addr_limit)
79 #define set_fs(x) (current_thread_info()->addr_limit = (x))
81 #define segment_eq(a, b) ((a).seg == (b).seg)
84 * eva_kernel_access() - determine whether kernel memory access on an EVA system
86 * Determines whether memory accesses should be performed to kernel memory
87 * on a system using Extended Virtual Addressing (EVA).
89 * Return: true if a kernel memory access on an EVA system, else false.
91 static inline bool eva_kernel_access(void)
93 if (!IS_ENABLED(CONFIG_EVA
))
96 return segment_eq(get_fs(), get_ds());
100 * Is a address valid? This does a straightforward calculation rather
104 * - "addr" doesn't have any high-bits set
105 * - AND "size" doesn't have any high-bits set
106 * - AND "addr+size" doesn't have any high-bits set
107 * - OR we are in kernel mode.
109 * __ua_size() is a trick to avoid runtime checking of positive constant
110 * sizes; for those we already know at compile time that the size is ok.
112 #define __ua_size(size) \
113 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
116 * access_ok: - Checks if a user space pointer is valid
117 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
118 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
119 * to write to a block, it is always safe to read from it.
120 * @addr: User space pointer to start of block to check
121 * @size: Size of block to check
123 * Context: User context only. This function may sleep if pagefaults are
126 * Checks if a pointer to a block of memory in user space is valid.
128 * Returns true (nonzero) if the memory block may be valid, false (zero)
129 * if it is definitely invalid.
131 * Note that, depending on architecture, this function probably just
132 * checks that the pointer is in the user space range - after calling
133 * this function, memory access functions may still return -EFAULT.
136 #define __access_mask get_fs().seg
138 #define __access_ok(addr, size, mask) \
140 unsigned long __addr = (unsigned long) (addr); \
141 unsigned long __size = size; \
142 unsigned long __mask = mask; \
143 unsigned long __ok; \
145 __chk_user_ptr(addr); \
146 __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
147 __ua_size(__size))); \
151 #define access_ok(type, addr, size) \
152 likely(__access_ok((addr), (size), __access_mask))
155 * put_user: - Write a simple value into user space.
156 * @x: Value to copy to user space.
157 * @ptr: Destination address, in user space.
159 * Context: User context only. This function may sleep if pagefaults are
162 * This macro copies a single simple value from kernel space to user
163 * space. It supports simple types like char and int, but not larger
164 * data types like structures or arrays.
166 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
167 * to the result of dereferencing @ptr.
169 * Returns zero on success, or -EFAULT on error.
171 #define put_user(x,ptr) \
172 __put_user_check((x), (ptr), sizeof(*(ptr)))
175 * get_user: - Get a simple variable from user space.
176 * @x: Variable to store result.
177 * @ptr: Source address, in user space.
179 * Context: User context only. This function may sleep if pagefaults are
182 * This macro copies a single simple variable from user space to kernel
183 * space. It supports simple types like char and int, but not larger
184 * data types like structures or arrays.
186 * @ptr must have pointer-to-simple-variable type, and the result of
187 * dereferencing @ptr must be assignable to @x without a cast.
189 * Returns zero on success, or -EFAULT on error.
190 * On error, the variable @x is set to zero.
192 #define get_user(x,ptr) \
193 __get_user_check((x), (ptr), sizeof(*(ptr)))
196 * __put_user: - Write a simple value into user space, with less checking.
197 * @x: Value to copy to user space.
198 * @ptr: Destination address, in user space.
200 * Context: User context only. This function may sleep if pagefaults are
203 * This macro copies a single simple value from kernel space to user
204 * space. It supports simple types like char and int, but not larger
205 * data types like structures or arrays.
207 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
208 * to the result of dereferencing @ptr.
210 * Caller must check the pointer with access_ok() before calling this
213 * Returns zero on success, or -EFAULT on error.
215 #define __put_user(x,ptr) \
216 __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
219 * __get_user: - Get a simple variable from user space, with less checking.
220 * @x: Variable to store result.
221 * @ptr: Source address, in user space.
223 * Context: User context only. This function may sleep if pagefaults are
226 * This macro copies a single simple variable from user space to kernel
227 * space. It supports simple types like char and int, but not larger
228 * data types like structures or arrays.
230 * @ptr must have pointer-to-simple-variable type, and the result of
231 * dereferencing @ptr must be assignable to @x without a cast.
233 * Caller must check the pointer with access_ok() before calling this
236 * Returns zero on success, or -EFAULT on error.
237 * On error, the variable @x is set to zero.
239 #define __get_user(x,ptr) \
240 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
242 struct __large_struct
{ unsigned long buf
[100]; };
243 #define __m(x) (*(struct __large_struct __user *)(x))
246 * Yuck. We need two variants, one for 64bit operation and one
247 * for 32 bit mode and old iron.
250 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
253 * Kernel specific functions for EVA. We need to use normal load instructions
254 * to read data from kernel when operating in EVA mode. We use these macros to
255 * avoid redefining __get_user_asm for EVA.
262 #define _loadd _loadw
264 #define _loadd(reg, addr) "ld " reg ", " addr
266 #define _loadw(reg, addr) "lw " reg ", " addr
267 #define _loadh(reg, addr) "lh " reg ", " addr
268 #define _loadb(reg, addr) "lb " reg ", " addr
270 #define __get_kernel_common(val, size, ptr) \
273 case 1: __get_data_asm(val, _loadb, ptr); break; \
274 case 2: __get_data_asm(val, _loadh, ptr); break; \
275 case 4: __get_data_asm(val, _loadw, ptr); break; \
276 case 8: __GET_DW(val, _loadd, ptr); break; \
277 default: __get_user_unknown(); break; \
283 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
286 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
289 extern void __get_user_unknown(void);
291 #define __get_user_common(val, size, ptr) \
294 case 1: __get_data_asm(val, user_lb, ptr); break; \
295 case 2: __get_data_asm(val, user_lh, ptr); break; \
296 case 4: __get_data_asm(val, user_lw, ptr); break; \
297 case 8: __GET_DW(val, user_ld, ptr); break; \
298 default: __get_user_unknown(); break; \
302 #define __get_user_nocheck(x, ptr, size) \
306 if (eva_kernel_access()) { \
307 __get_kernel_common((x), size, ptr); \
309 __chk_user_ptr(ptr); \
310 __get_user_common((x), size, ptr); \
315 #define __get_user_check(x, ptr, size) \
317 int __gu_err = -EFAULT; \
318 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
321 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
322 if (eva_kernel_access()) \
323 __get_kernel_common((x), size, __gu_ptr); \
325 __get_user_common((x), size, __gu_ptr); \
332 #define __get_data_asm(val, insn, addr) \
336 __asm__ __volatile__( \
337 "1: "insn("%1", "%3")" \n" \
340 " .section .fixup,\"ax\" \n" \
345 " .section __ex_table,\"a\" \n" \
346 " "__UA_ADDR "\t1b, 3b \n" \
348 : "=r" (__gu_err), "=r" (__gu_tmp) \
349 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
351 (val) = (__typeof__(*(addr))) __gu_tmp; \
355 * Get a long long 64 using 32 bit registers.
357 #define __get_data_asm_ll32(val, insn, addr) \
360 unsigned long long l; \
361 __typeof__(*(addr)) t; \
364 __asm__ __volatile__( \
365 "1: " insn("%1", "(%3)")" \n" \
366 "2: " insn("%D1", "4(%3)")" \n" \
369 " .section .fixup,\"ax\" \n" \
375 " .section __ex_table,\"a\" \n" \
376 " " __UA_ADDR " 1b, 4b \n" \
377 " " __UA_ADDR " 2b, 4b \n" \
379 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
380 : "0" (0), "r" (addr), "i" (-EFAULT)); \
382 (val) = __gu_tmp.t; \
386 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
389 * Kernel specific functions for EVA. We need to use normal load instructions
390 * to read data from kernel when operating in EVA mode. We use these macros to
391 * avoid redefining __get_data_asm for EVA.
398 #define _stored _storew
400 #define _stored(reg, addr) "ld " reg ", " addr
403 #define _storew(reg, addr) "sw " reg ", " addr
404 #define _storeh(reg, addr) "sh " reg ", " addr
405 #define _storeb(reg, addr) "sb " reg ", " addr
407 #define __put_kernel_common(ptr, size) \
410 case 1: __put_data_asm(_storeb, ptr); break; \
411 case 2: __put_data_asm(_storeh, ptr); break; \
412 case 4: __put_data_asm(_storew, ptr); break; \
413 case 8: __PUT_DW(_stored, ptr); break; \
414 default: __put_user_unknown(); break; \
420 * Yuck. We need two variants, one for 64bit operation and one
421 * for 32 bit mode and old iron.
424 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
427 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
430 #define __put_user_common(ptr, size) \
433 case 1: __put_data_asm(user_sb, ptr); break; \
434 case 2: __put_data_asm(user_sh, ptr); break; \
435 case 4: __put_data_asm(user_sw, ptr); break; \
436 case 8: __PUT_DW(user_sd, ptr); break; \
437 default: __put_user_unknown(); break; \
441 #define __put_user_nocheck(x, ptr, size) \
443 __typeof__(*(ptr)) __pu_val; \
447 if (eva_kernel_access()) { \
448 __put_kernel_common(ptr, size); \
450 __chk_user_ptr(ptr); \
451 __put_user_common(ptr, size); \
456 #define __put_user_check(x, ptr, size) \
458 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
459 __typeof__(*(ptr)) __pu_val = (x); \
460 int __pu_err = -EFAULT; \
463 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
464 if (eva_kernel_access()) \
465 __put_kernel_common(__pu_addr, size); \
467 __put_user_common(__pu_addr, size); \
473 #define __put_data_asm(insn, ptr) \
475 __asm__ __volatile__( \
476 "1: "insn("%z2", "%3")" # __put_data_asm \n" \
479 " .section .fixup,\"ax\" \n" \
483 " .section __ex_table,\"a\" \n" \
484 " " __UA_ADDR " 1b, 3b \n" \
487 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
491 #define __put_data_asm_ll32(insn, ptr) \
493 __asm__ __volatile__( \
494 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
495 "2: "insn("%D2", "4(%3)")" \n" \
498 " .section .fixup,\"ax\" \n" \
502 " .section __ex_table,\"a\" \n" \
503 " " __UA_ADDR " 1b, 4b \n" \
504 " " __UA_ADDR " 2b, 4b \n" \
507 : "0" (0), "r" (__pu_val), "r" (ptr), \
511 extern void __put_user_unknown(void);
514 * ul{b,h,w} are macros and there are no equivalent macros for EVA.
515 * EVA unaligned access is handled in the ADE exception handler.
519 * put_user_unaligned: - Write a simple value into user space.
520 * @x: Value to copy to user space.
521 * @ptr: Destination address, in user space.
523 * Context: User context only. This function may sleep if pagefaults are
526 * This macro copies a single simple value from kernel space to user
527 * space. It supports simple types like char and int, but not larger
528 * data types like structures or arrays.
530 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
531 * to the result of dereferencing @ptr.
533 * Returns zero on success, or -EFAULT on error.
535 #define put_user_unaligned(x,ptr) \
536 __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
539 * get_user_unaligned: - Get a simple variable from user space.
540 * @x: Variable to store result.
541 * @ptr: Source address, in user space.
543 * Context: User context only. This function may sleep if pagefaults are
546 * This macro copies a single simple variable from user space to kernel
547 * space. It supports simple types like char and int, but not larger
548 * data types like structures or arrays.
550 * @ptr must have pointer-to-simple-variable type, and the result of
551 * dereferencing @ptr must be assignable to @x without a cast.
553 * Returns zero on success, or -EFAULT on error.
554 * On error, the variable @x is set to zero.
556 #define get_user_unaligned(x,ptr) \
557 __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
560 * __put_user_unaligned: - Write a simple value into user space, with less checking.
561 * @x: Value to copy to user space.
562 * @ptr: Destination address, in user space.
564 * Context: User context only. This function may sleep if pagefaults are
567 * This macro copies a single simple value from kernel space to user
568 * space. It supports simple types like char and int, but not larger
569 * data types like structures or arrays.
571 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
572 * to the result of dereferencing @ptr.
574 * Caller must check the pointer with access_ok() before calling this
577 * Returns zero on success, or -EFAULT on error.
579 #define __put_user_unaligned(x,ptr) \
580 __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
583 * __get_user_unaligned: - Get a simple variable from user space, with less checking.
584 * @x: Variable to store result.
585 * @ptr: Source address, in user space.
587 * Context: User context only. This function may sleep if pagefaults are
590 * This macro copies a single simple variable from user space to kernel
591 * space. It supports simple types like char and int, but not larger
592 * data types like structures or arrays.
594 * @ptr must have pointer-to-simple-variable type, and the result of
595 * dereferencing @ptr must be assignable to @x without a cast.
597 * Caller must check the pointer with access_ok() before calling this
600 * Returns zero on success, or -EFAULT on error.
601 * On error, the variable @x is set to zero.
603 #define __get_user_unaligned(x,ptr) \
604 __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
607 * Yuck. We need two variants, one for 64bit operation and one
608 * for 32 bit mode and old iron.
611 #define __GET_USER_UNALIGNED_DW(val, ptr) \
612 __get_user_unaligned_asm_ll32(val, ptr)
615 #define __GET_USER_UNALIGNED_DW(val, ptr) \
616 __get_user_unaligned_asm(val, "uld", ptr)
619 extern void __get_user_unaligned_unknown(void);
621 #define __get_user_unaligned_common(val, size, ptr) \
624 case 1: __get_data_asm(val, "lb", ptr); break; \
625 case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \
626 case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \
627 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
628 default: __get_user_unaligned_unknown(); break; \
632 #define __get_user_unaligned_nocheck(x,ptr,size) \
636 __get_user_unaligned_common((x), size, ptr); \
640 #define __get_user_unaligned_check(x,ptr,size) \
642 int __gu_err = -EFAULT; \
643 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
645 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
646 __get_user_unaligned_common((x), size, __gu_ptr); \
651 #define __get_data_unaligned_asm(val, insn, addr) \
655 __asm__ __volatile__( \
656 "1: " insn " %1, %3 \n" \
659 " .section .fixup,\"ax\" \n" \
664 " .section __ex_table,\"a\" \n" \
665 " "__UA_ADDR "\t1b, 3b \n" \
666 " "__UA_ADDR "\t1b + 4, 3b \n" \
668 : "=r" (__gu_err), "=r" (__gu_tmp) \
669 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
671 (val) = (__typeof__(*(addr))) __gu_tmp; \
675 * Get a long long 64 using 32 bit registers.
677 #define __get_user_unaligned_asm_ll32(val, addr) \
679 unsigned long long __gu_tmp; \
681 __asm__ __volatile__( \
682 "1: ulw %1, (%3) \n" \
683 "2: ulw %D1, 4(%3) \n" \
687 " .section .fixup,\"ax\" \n" \
693 " .section __ex_table,\"a\" \n" \
694 " " __UA_ADDR " 1b, 4b \n" \
695 " " __UA_ADDR " 1b + 4, 4b \n" \
696 " " __UA_ADDR " 2b, 4b \n" \
697 " " __UA_ADDR " 2b + 4, 4b \n" \
699 : "=r" (__gu_err), "=&r" (__gu_tmp) \
700 : "0" (0), "r" (addr), "i" (-EFAULT)); \
701 (val) = (__typeof__(*(addr))) __gu_tmp; \
705 * Yuck. We need two variants, one for 64bit operation and one
706 * for 32 bit mode and old iron.
709 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
712 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
715 #define __put_user_unaligned_common(ptr, size) \
718 case 1: __put_data_asm("sb", ptr); break; \
719 case 2: __put_user_unaligned_asm("ush", ptr); break; \
720 case 4: __put_user_unaligned_asm("usw", ptr); break; \
721 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
722 default: __put_user_unaligned_unknown(); break; \
725 #define __put_user_unaligned_nocheck(x,ptr,size) \
727 __typeof__(*(ptr)) __pu_val; \
731 __put_user_unaligned_common(ptr, size); \
735 #define __put_user_unaligned_check(x,ptr,size) \
737 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
738 __typeof__(*(ptr)) __pu_val = (x); \
739 int __pu_err = -EFAULT; \
741 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
742 __put_user_unaligned_common(__pu_addr, size); \
747 #define __put_user_unaligned_asm(insn, ptr) \
749 __asm__ __volatile__( \
750 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
753 " .section .fixup,\"ax\" \n" \
757 " .section __ex_table,\"a\" \n" \
758 " " __UA_ADDR " 1b, 3b \n" \
761 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
765 #define __put_user_unaligned_asm_ll32(ptr) \
767 __asm__ __volatile__( \
768 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
769 "2: sw %D2, 4(%3) \n" \
772 " .section .fixup,\"ax\" \n" \
776 " .section __ex_table,\"a\" \n" \
777 " " __UA_ADDR " 1b, 4b \n" \
778 " " __UA_ADDR " 1b + 4, 4b \n" \
779 " " __UA_ADDR " 2b, 4b \n" \
780 " " __UA_ADDR " 2b + 4, 4b \n" \
783 : "0" (0), "r" (__pu_val), "r" (ptr), \
787 extern void __put_user_unaligned_unknown(void);
791 * We're generating jump to subroutines which will be outside the range of
795 #define __MODULE_JAL(destination) \
797 __UA_LA "\t$1, " #destination "\n\t" \
801 #define __MODULE_JAL(destination) \
802 "jal\t" #destination "\n\t"
805 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
806 defined(CONFIG_CPU_HAS_PREFETCH))
807 #define DADDI_SCRATCH "$3"
809 #define DADDI_SCRATCH "$0"
812 extern size_t __copy_user(void *__to
, const void *__from
, size_t __n
);
815 #define __invoke_copy_to_user(to, from, n) \
817 register void __user *__cu_to_r __asm__("$4"); \
818 register const void *__cu_from_r __asm__("$5"); \
819 register long __cu_len_r __asm__("$6"); \
822 __cu_from_r = (from); \
824 __asm__ __volatile__( \
825 __MODULE_JAL(__copy_user) \
826 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
828 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
829 DADDI_SCRATCH, "memory"); \
833 #define __invoke_copy_to_kernel(to, from, n) \
834 __invoke_copy_to_user(to, from, n)
839 * __copy_to_user: - Copy a block of data into user space, with less checking.
840 * @to: Destination address, in user space.
841 * @from: Source address, in kernel space.
842 * @n: Number of bytes to copy.
844 * Context: User context only. This function may sleep if pagefaults are
847 * Copy data from kernel space to user space. Caller must check
848 * the specified block with access_ok() before calling this function.
850 * Returns number of bytes that could not be copied.
851 * On success, this will be zero.
853 #define __copy_to_user(to, from, n) \
855 void __user *__cu_to; \
856 const void *__cu_from; \
860 __cu_from = (from); \
863 check_object_size(__cu_from, __cu_len, true); \
866 if (eva_kernel_access()) \
867 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
870 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
875 extern size_t __copy_user_inatomic(void *__to
, const void *__from
, size_t __n
);
877 #define __copy_to_user_inatomic(to, from, n) \
879 void __user *__cu_to; \
880 const void *__cu_from; \
884 __cu_from = (from); \
887 check_object_size(__cu_from, __cu_len, true); \
889 if (eva_kernel_access()) \
890 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
893 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
898 #define __copy_from_user_inatomic(to, from, n) \
901 const void __user *__cu_from; \
905 __cu_from = (from); \
908 check_object_size(__cu_to, __cu_len, false); \
910 if (eva_kernel_access()) \
911 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
915 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
922 * copy_to_user: - Copy a block of data into user space.
923 * @to: Destination address, in user space.
924 * @from: Source address, in kernel space.
925 * @n: Number of bytes to copy.
927 * Context: User context only. This function may sleep if pagefaults are
930 * Copy data from kernel space to user space.
932 * Returns number of bytes that could not be copied.
933 * On success, this will be zero.
935 #define copy_to_user(to, from, n) \
937 void __user *__cu_to; \
938 const void *__cu_from; \
942 __cu_from = (from); \
945 check_object_size(__cu_from, __cu_len, true); \
947 if (eva_kernel_access()) { \
948 __cu_len = __invoke_copy_to_kernel(__cu_to, \
952 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
954 __cu_len = __invoke_copy_to_user(__cu_to, \
964 #define __invoke_copy_from_user(to, from, n) \
966 register void *__cu_to_r __asm__("$4"); \
967 register const void __user *__cu_from_r __asm__("$5"); \
968 register long __cu_len_r __asm__("$6"); \
971 __cu_from_r = (from); \
973 __asm__ __volatile__( \
974 ".set\tnoreorder\n\t" \
975 __MODULE_JAL(__copy_user) \
977 __UA_ADDU "\t$1, %1, %2\n\t" \
980 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
982 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
983 DADDI_SCRATCH, "memory"); \
987 #define __invoke_copy_from_kernel(to, from, n) \
988 __invoke_copy_from_user(to, from, n)
990 /* For userland <-> userland operations */
991 #define ___invoke_copy_in_user(to, from, n) \
992 __invoke_copy_from_user(to, from, n)
994 /* For kernel <-> kernel operations */
995 #define ___invoke_copy_in_kernel(to, from, n) \
996 __invoke_copy_from_user(to, from, n)
998 #define __invoke_copy_from_user_inatomic(to, from, n) \
1000 register void *__cu_to_r __asm__("$4"); \
1001 register const void __user *__cu_from_r __asm__("$5"); \
1002 register long __cu_len_r __asm__("$6"); \
1005 __cu_from_r = (from); \
1007 __asm__ __volatile__( \
1008 ".set\tnoreorder\n\t" \
1009 __MODULE_JAL(__copy_user_inatomic) \
1011 __UA_ADDU "\t$1, %1, %2\n\t" \
1014 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1016 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1017 DADDI_SCRATCH, "memory"); \
1021 #define __invoke_copy_from_kernel_inatomic(to, from, n) \
1022 __invoke_copy_from_user_inatomic(to, from, n) \
1026 /* EVA specific functions */
1028 extern size_t __copy_user_inatomic_eva(void *__to
, const void *__from
,
1030 extern size_t __copy_from_user_eva(void *__to
, const void *__from
,
1032 extern size_t __copy_to_user_eva(void *__to
, const void *__from
,
1034 extern size_t __copy_in_user_eva(void *__to
, const void *__from
, size_t __n
);
1036 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
1038 register void *__cu_to_r __asm__("$4"); \
1039 register const void __user *__cu_from_r __asm__("$5"); \
1040 register long __cu_len_r __asm__("$6"); \
1043 __cu_from_r = (from); \
1045 __asm__ __volatile__( \
1046 ".set\tnoreorder\n\t" \
1047 __MODULE_JAL(func_ptr) \
1049 __UA_ADDU "\t$1, %1, %2\n\t" \
1052 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1054 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1055 DADDI_SCRATCH, "memory"); \
1059 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
1061 register void *__cu_to_r __asm__("$4"); \
1062 register const void __user *__cu_from_r __asm__("$5"); \
1063 register long __cu_len_r __asm__("$6"); \
1066 __cu_from_r = (from); \
1068 __asm__ __volatile__( \
1069 __MODULE_JAL(func_ptr) \
1070 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1072 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1073 DADDI_SCRATCH, "memory"); \
1078 * Source or destination address is in userland. We need to go through
1081 #define __invoke_copy_from_user(to, from, n) \
1082 __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1084 #define __invoke_copy_from_user_inatomic(to, from, n) \
1085 __invoke_copy_from_user_eva_generic(to, from, n, \
1086 __copy_user_inatomic_eva)
1088 #define __invoke_copy_to_user(to, from, n) \
1089 __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1091 #define ___invoke_copy_in_user(to, from, n) \
1092 __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1095 * Source or destination address in the kernel. We are not going through
1098 #define __invoke_copy_from_kernel(to, from, n) \
1099 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1101 #define __invoke_copy_from_kernel_inatomic(to, from, n) \
1102 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1104 #define __invoke_copy_to_kernel(to, from, n) \
1105 __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1107 #define ___invoke_copy_in_kernel(to, from, n) \
1108 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1110 #endif /* CONFIG_EVA */
1113 * __copy_from_user: - Copy a block of data from user space, with less checking.
1114 * @to: Destination address, in kernel space.
1115 * @from: Source address, in user space.
1116 * @n: Number of bytes to copy.
1118 * Context: User context only. This function may sleep if pagefaults are
1121 * Copy data from user space to kernel space. Caller must check
1122 * the specified block with access_ok() before calling this function.
1124 * Returns number of bytes that could not be copied.
1125 * On success, this will be zero.
1127 * If some data could not be copied, this function will pad the copied
1128 * data to the requested size using zero bytes.
1130 #define __copy_from_user(to, from, n) \
1133 const void __user *__cu_from; \
1137 __cu_from = (from); \
1140 check_object_size(__cu_to, __cu_len, false); \
1142 if (eva_kernel_access()) { \
1143 __cu_len = __invoke_copy_from_kernel(__cu_to, \
1148 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
1155 * copy_from_user: - Copy a block of data from user space.
1156 * @to: Destination address, in kernel space.
1157 * @from: Source address, in user space.
1158 * @n: Number of bytes to copy.
1160 * Context: User context only. This function may sleep if pagefaults are
1163 * Copy data from user space to kernel space.
1165 * Returns number of bytes that could not be copied.
1166 * On success, this will be zero.
1168 * If some data could not be copied, this function will pad the copied
1169 * data to the requested size using zero bytes.
1171 #define copy_from_user(to, from, n) \
1174 const void __user *__cu_from; \
1178 __cu_from = (from); \
1181 check_object_size(__cu_to, __cu_len, false); \
1183 if (eva_kernel_access()) { \
1184 __cu_len = __invoke_copy_from_kernel(__cu_to, \
1188 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
1190 __cu_len = __invoke_copy_from_user(__cu_to, \
1194 memset(__cu_to, 0, __cu_len); \
1200 #define __copy_in_user(to, from, n) \
1202 void __user *__cu_to; \
1203 const void __user *__cu_from; \
1207 __cu_from = (from); \
1209 if (eva_kernel_access()) { \
1210 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1214 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
1220 #define copy_in_user(to, from, n) \
1222 void __user *__cu_to; \
1223 const void __user *__cu_from; \
1227 __cu_from = (from); \
1229 if (eva_kernel_access()) { \
1230 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
1233 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1234 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1236 __cu_len = ___invoke_copy_in_user(__cu_to, \
1245 * __clear_user: - Zero a block of memory in user space, with less checking.
1246 * @to: Destination address, in user space.
1247 * @n: Number of bytes to zero.
1249 * Zero a block of memory in user space. Caller must check
1250 * the specified block with access_ok() before calling this function.
1252 * Returns number of bytes that could not be cleared.
1253 * On success, this will be zero.
1255 static inline __kernel_size_t
1256 __clear_user(void __user
*addr
, __kernel_size_t size
)
1258 __kernel_size_t res
;
1260 if (eva_kernel_access()) {
1261 __asm__
__volatile__(
1265 __MODULE_JAL(__bzero_kernel
)
1268 : "r" (addr
), "r" (size
)
1269 : "$4", "$5", "$6", __UA_t0
, __UA_t1
, "$31");
1272 __asm__
__volatile__(
1276 __MODULE_JAL(__bzero
)
1279 : "r" (addr
), "r" (size
)
1280 : "$4", "$5", "$6", __UA_t0
, __UA_t1
, "$31");
1286 #define clear_user(addr,n) \
1288 void __user * __cl_addr = (addr); \
1289 unsigned long __cl_size = (n); \
1290 if (__cl_size && access_ok(VERIFY_WRITE, \
1291 __cl_addr, __cl_size)) \
1292 __cl_size = __clear_user(__cl_addr, __cl_size); \
1297 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1298 * @dst: Destination address, in kernel space. This buffer must be at
1299 * least @count bytes long.
1300 * @src: Source address, in user space.
1301 * @count: Maximum number of bytes to copy, including the trailing NUL.
1303 * Copies a NUL-terminated string from userspace to kernel space.
1304 * Caller must check the specified block with access_ok() before calling
1307 * On success, returns the length of the string (not including the trailing
1310 * If access to userspace fails, returns -EFAULT (some data may have been
1313 * If @count is smaller than the length of the string, copies @count bytes
1314 * and returns @count.
1317 __strncpy_from_user(char *__to
, const char __user
*__from
, long __len
)
1321 if (eva_kernel_access()) {
1322 __asm__
__volatile__(
1326 __MODULE_JAL(__strncpy_from_kernel_nocheck_asm
)
1329 : "r" (__to
), "r" (__from
), "r" (__len
)
1330 : "$2", "$3", "$4", "$5", "$6", __UA_t0
, "$31", "memory");
1333 __asm__
__volatile__(
1337 __MODULE_JAL(__strncpy_from_user_nocheck_asm
)
1340 : "r" (__to
), "r" (__from
), "r" (__len
)
1341 : "$2", "$3", "$4", "$5", "$6", __UA_t0
, "$31", "memory");
1348 * strncpy_from_user: - Copy a NUL terminated string from userspace.
1349 * @dst: Destination address, in kernel space. This buffer must be at
1350 * least @count bytes long.
1351 * @src: Source address, in user space.
1352 * @count: Maximum number of bytes to copy, including the trailing NUL.
1354 * Copies a NUL-terminated string from userspace to kernel space.
1356 * On success, returns the length of the string (not including the trailing
1359 * If access to userspace fails, returns -EFAULT (some data may have been
1362 * If @count is smaller than the length of the string, copies @count bytes
1363 * and returns @count.
1366 strncpy_from_user(char *__to
, const char __user
*__from
, long __len
)
1370 if (eva_kernel_access()) {
1371 __asm__
__volatile__(
1375 __MODULE_JAL(__strncpy_from_kernel_asm
)
1378 : "r" (__to
), "r" (__from
), "r" (__len
)
1379 : "$2", "$3", "$4", "$5", "$6", __UA_t0
, "$31", "memory");
1382 __asm__
__volatile__(
1386 __MODULE_JAL(__strncpy_from_user_asm
)
1389 : "r" (__to
), "r" (__from
), "r" (__len
)
1390 : "$2", "$3", "$4", "$5", "$6", __UA_t0
, "$31", "memory");
1397 * strlen_user: - Get the size of a string in user space.
1398 * @str: The string to measure.
1400 * Context: User context only. This function may sleep if pagefaults are
1403 * Get the size of a NUL-terminated string in user space.
1405 * Returns the size of the string INCLUDING the terminating NUL.
1406 * On exception, returns 0.
1408 * If there is a limit on the length of a valid string, you may wish to
1409 * consider using strnlen_user() instead.
1411 static inline long strlen_user(const char __user
*s
)
1415 if (eva_kernel_access()) {
1416 __asm__
__volatile__(
1418 __MODULE_JAL(__strlen_kernel_asm
)
1422 : "$2", "$4", __UA_t0
, "$31");
1425 __asm__
__volatile__(
1427 __MODULE_JAL(__strlen_user_asm
)
1431 : "$2", "$4", __UA_t0
, "$31");
1437 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1438 static inline long __strnlen_user(const char __user
*s
, long n
)
1442 if (eva_kernel_access()) {
1443 __asm__
__volatile__(
1446 __MODULE_JAL(__strnlen_kernel_nocheck_asm
)
1450 : "$2", "$4", "$5", __UA_t0
, "$31");
1453 __asm__
__volatile__(
1456 __MODULE_JAL(__strnlen_user_nocheck_asm
)
1460 : "$2", "$4", "$5", __UA_t0
, "$31");
1467 * strnlen_user: - Get the size of a string in user space.
1468 * @str: The string to measure.
1470 * Context: User context only. This function may sleep if pagefaults are
1473 * Get the size of a NUL-terminated string in user space.
1475 * Returns the size of the string INCLUDING the terminating NUL.
1476 * On exception, returns 0.
1477 * If the string is too long, returns a value greater than @n.
1479 static inline long strnlen_user(const char __user
*s
, long n
)
1484 if (eva_kernel_access()) {
1485 __asm__
__volatile__(
1488 __MODULE_JAL(__strnlen_kernel_asm
)
1492 : "$2", "$4", "$5", __UA_t0
, "$31");
1494 __asm__
__volatile__(
1497 __MODULE_JAL(__strnlen_user_asm
)
1501 : "$2", "$4", "$5", __UA_t0
, "$31");
1507 #endif /* _ASM_UACCESS_H */