]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/include/asm/uaccess.h
2 * Based on arch/arm/include/asm/uaccess.h
4 * Copyright (C) 2012 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ASM_UACCESS_H
19 #define __ASM_UACCESS_H
22 * User space memory access functions
24 #include <linux/string.h>
25 #include <linux/thread_info.h>
27 #include <asm/alternative.h>
28 #include <asm/cpufeature.h>
29 #include <asm/ptrace.h>
30 #include <asm/sysreg.h>
31 #include <asm/errno.h>
32 #include <asm/memory.h>
33 #include <asm/compiler.h>
36 #define VERIFY_WRITE 1
39 * The exception table consists of pairs of relative offsets: the first
40 * is the relative offset to an instruction that is allowed to fault,
41 * and the second is the relative offset at which the program should
42 * continue. No registers are modified, so it is entirely up to the
43 * continuation code to figure out what to do.
45 * All the routines below use bits of fixup code that are out of line
46 * with the main instruction path. This means when everything is well,
47 * we don't even have to jump over them. Further, they do not intrude
48 * on our cache or tlb entries.
51 struct exception_table_entry
56 #define ARCH_HAS_RELATIVE_EXTABLE
58 extern int fixup_exception(struct pt_regs
*regs
);
60 #define KERNEL_DS (-1UL)
61 #define get_ds() (KERNEL_DS)
63 #define USER_DS TASK_SIZE_64
64 #define get_fs() (current_thread_info()->addr_limit)
66 static inline void set_fs(mm_segment_t fs
)
68 current_thread_info()->addr_limit
= fs
;
71 * Enable/disable UAO so that copy_to_user() etc can access
72 * kernel memory with the unprivileged instructions.
74 if (IS_ENABLED(CONFIG_ARM64_UAO
) && fs
== KERNEL_DS
)
75 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO
));
77 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO
,
81 #define segment_eq(a, b) ((a) == (b))
84 * Return 1 if addr < current->addr_limit, 0 otherwise.
86 #define __addr_ok(addr) \
89 asm("cmp %1, %0; cset %0, lo" \
91 : "r" (addr), "0" (current_thread_info()->addr_limit) \
97 * Test whether a block of memory is a valid user space address.
98 * Returns 1 if the range is valid, 0 otherwise.
100 * This is equivalent to the following test:
101 * (u65)addr + (u65)size <= current->addr_limit
103 * This needs 65-bit arithmetic.
105 #define __range_ok(addr, size) \
107 unsigned long flag, roksum; \
108 __chk_user_ptr(addr); \
109 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
110 : "=&r" (flag), "=&r" (roksum) \
111 : "1" (addr), "Ir" (size), \
112 "r" (current_thread_info()->addr_limit) \
117 #define access_ok(type, addr, size) __range_ok(addr, size)
118 #define user_addr_max get_fs
120 #define _ASM_EXTABLE(from, to) \
121 " .pushsection __ex_table, \"a\"\n" \
123 " .long (" #from " - .), (" #to " - .)\n" \
127 * The "__xxx" versions of the user access functions do not verify the address
128 * space - it must have been done previously with a separate "access_ok()"
131 * The "__xxx_error" versions set the third argument to -EFAULT if an error
132 * occurs, and leave it unchanged on success.
134 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
136 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
137 alt_instr " " reg "1, [%2]\n", feature) \
139 " .section .fixup, \"ax\"\n" \
145 _ASM_EXTABLE(1b, 3b) \
146 : "+r" (err), "=&r" (x) \
147 : "r" (addr), "i" (-EFAULT))
149 #define __get_user_err(x, ptr, err) \
151 unsigned long __gu_val; \
152 __chk_user_ptr(ptr); \
153 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
154 CONFIG_ARM64_PAN)); \
155 switch (sizeof(*(ptr))) { \
157 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
158 (err), ARM64_HAS_UAO); \
161 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
162 (err), ARM64_HAS_UAO); \
165 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
166 (err), ARM64_HAS_UAO); \
169 __get_user_asm("ldr", "ldtr", "%", __gu_val, (ptr), \
170 (err), ARM64_HAS_UAO); \
175 (x) = (__force __typeof__(*(ptr)))__gu_val; \
176 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
177 CONFIG_ARM64_PAN)); \
180 #define __get_user(x, ptr) \
183 __get_user_err((x), (ptr), __gu_err); \
187 #define __get_user_error(x, ptr, err) \
189 __get_user_err((x), (ptr), (err)); \
193 #define __get_user_unaligned __get_user
195 #define get_user(x, ptr) \
197 __typeof__(*(ptr)) __user *__p = (ptr); \
199 access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
200 __get_user((x), __p) : \
201 ((x) = 0, -EFAULT); \
204 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
206 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
207 alt_instr " " reg "1, [%2]\n", feature) \
209 " .section .fixup,\"ax\"\n" \
214 _ASM_EXTABLE(1b, 3b) \
216 : "r" (x), "r" (addr), "i" (-EFAULT))
218 #define __put_user_err(x, ptr, err) \
220 __typeof__(*(ptr)) __pu_val = (x); \
221 __chk_user_ptr(ptr); \
222 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
223 CONFIG_ARM64_PAN)); \
224 switch (sizeof(*(ptr))) { \
226 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
227 (err), ARM64_HAS_UAO); \
230 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
231 (err), ARM64_HAS_UAO); \
234 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
235 (err), ARM64_HAS_UAO); \
238 __put_user_asm("str", "sttr", "%", __pu_val, (ptr), \
239 (err), ARM64_HAS_UAO); \
244 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
245 CONFIG_ARM64_PAN)); \
248 #define __put_user(x, ptr) \
251 __put_user_err((x), (ptr), __pu_err); \
255 #define __put_user_error(x, ptr, err) \
257 __put_user_err((x), (ptr), (err)); \
261 #define __put_user_unaligned __put_user
263 #define put_user(x, ptr) \
265 __typeof__(*(ptr)) __user *__p = (ptr); \
267 access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
268 __put_user((x), __p) : \
272 extern unsigned long __must_check
__copy_from_user(void *to
, const void __user
*from
, unsigned long n
);
273 extern unsigned long __must_check
__copy_to_user(void __user
*to
, const void *from
, unsigned long n
);
274 extern unsigned long __must_check
__copy_in_user(void __user
*to
, const void __user
*from
, unsigned long n
);
275 extern unsigned long __must_check
__clear_user(void __user
*addr
, unsigned long n
);
277 static inline unsigned long __must_check
copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
279 if (access_ok(VERIFY_READ
, from
, n
))
280 n
= __copy_from_user(to
, from
, n
);
281 else /* security hole - plug it */
286 static inline unsigned long __must_check
copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
288 if (access_ok(VERIFY_WRITE
, to
, n
))
289 n
= __copy_to_user(to
, from
, n
);
293 static inline unsigned long __must_check
copy_in_user(void __user
*to
, const void __user
*from
, unsigned long n
)
295 if (access_ok(VERIFY_READ
, from
, n
) && access_ok(VERIFY_WRITE
, to
, n
))
296 n
= __copy_in_user(to
, from
, n
);
300 #define __copy_to_user_inatomic __copy_to_user
301 #define __copy_from_user_inatomic __copy_from_user
303 static inline unsigned long __must_check
clear_user(void __user
*to
, unsigned long n
)
305 if (access_ok(VERIFY_WRITE
, to
, n
))
306 n
= __clear_user(to
, n
);
310 extern long strncpy_from_user(char *dest
, const char __user
*src
, long count
);
312 extern __must_check
long strlen_user(const char __user
*str
);
313 extern __must_check
long strnlen_user(const char __user
*str
, long n
);
315 #endif /* __ASM_UACCESS_H */