]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm/include/asm/uaccess.h
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[mirror_ubuntu-jammy-kernel.git] / arch / arm / include / asm / uaccess.h
CommitLineData
d2912cb1 1/* SPDX-License-Identifier: GPL-2.0-only */
1da177e4 2/*
4baa9922 3 * arch/arm/include/asm/uaccess.h
1da177e4
LT
4 */
5#ifndef _ASMARM_UACCESS_H
6#define _ASMARM_UACCESS_H
7
8/*
9 * User space memory access functions
10 */
87c52578 11#include <linux/string.h>
1da177e4
LT
12#include <asm/memory.h>
13#include <asm/domain.h>
8b592783 14#include <asm/unified.h>
9f97da78 15#include <asm/compiler.h>
1da177e4 16
0f9b38cd 17#include <asm/extable.h>
1da177e4 18
3fba7e23
RK
19/*
20 * These two functions allow hooking accesses to userspace to increase
21 * system integrity by ensuring that the kernel can not inadvertantly
22 * perform such accesses (eg, via list poison values) which could then
23 * be exploited for priviledge escalation.
24 */
851140ab 25static __always_inline unsigned int uaccess_save_and_enable(void)
3fba7e23 26{
a5e090ac
RK
27#ifdef CONFIG_CPU_SW_DOMAIN_PAN
28 unsigned int old_domain = get_domain();
29
30 /* Set the current domain access to permit user accesses */
31 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
32 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
33
34 return old_domain;
35#else
3fba7e23 36 return 0;
a5e090ac 37#endif
3fba7e23
RK
38}
39
851140ab 40static __always_inline void uaccess_restore(unsigned int flags)
3fba7e23 41{
a5e090ac
RK
42#ifdef CONFIG_CPU_SW_DOMAIN_PAN
43 /* Restore the user access mask */
44 set_domain(flags);
45#endif
3fba7e23
RK
46}
47
9641c7cc
RK
48/*
49 * These two are intentionally not defined anywhere - if the kernel
50 * code generates any references to them, that's a bug.
51 */
52extern int __get_user_bad(void);
53extern int __put_user_bad(void);
54
9641c7cc
RK
55#ifdef CONFIG_MMU
56
2350ebe2
RK
57/*
58 * We use 33-bit arithmetic here. Success returns zero, failure returns
59 * addr_limit. We take advantage that addr_limit will be zero for KERNEL_DS,
60 * so this will always return success in that case.
61 */
295bb01e 62#define __range_ok(addr, size) ({ \
16cf5b39 63 unsigned long flag, roksum; \
1da177e4 64 __chk_user_ptr(addr); \
32fdb046
SA
65 __asm__(".syntax unified\n" \
66 "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
16cf5b39 67 : "=&r" (flag), "=&r" (roksum) \
8ac6f5d7 68 : "r" (addr), "Ir" (size), "0" (TASK_SIZE) \
1da177e4
LT
69 : "cc"); \
70 flag; })
71
d09fbb32
RK
72/*
73 * This is a type: either unsigned long, if the argument fits into
74 * that type, or otherwise unsigned long long.
75 */
76#define __inttype(x) \
77 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
78
afaf6838
JT
79/*
80 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
81 * is above the current addr_limit.
82 */
83#define uaccess_mask_range_ptr(ptr, size) \
84 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
85static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
86 size_t size)
87{
88 void __user *safe_ptr = (void __user *)ptr;
89 unsigned long tmp;
90
91 asm volatile(
fe09d9c6 92 " .syntax unified\n"
afaf6838
JT
93 " sub %1, %3, #1\n"
94 " subs %1, %1, %0\n"
95 " addhs %1, %1, #1\n"
fe09d9c6 96 " subshs %1, %1, %2\n"
afaf6838
JT
97 " movlo %0, #0\n"
98 : "+r" (safe_ptr), "=&r" (tmp)
8ac6f5d7 99 : "r" (size), "r" (TASK_SIZE)
afaf6838
JT
100 : "cc");
101
102 csdb();
103 return safe_ptr;
104}
105
1da177e4
LT
106/*
107 * Single-value transfer routines. They automatically use the right
108 * size if we just have the right pointer type. Note that the functions
109 * which read from user space (*get_*) need to take care not to leak
110 * kernel data even if the calling code is buggy and fails to check
111 * the return value. This means zeroing out the destination variable
112 * or buffer on error. Normally this is done out of line by the
113 * fixup code, but there are a few places where it intrudes on the
114 * main code path. When we only write to user space, there is no
115 * problem.
1da177e4 116 */
1da177e4
LT
117extern int __get_user_1(void *);
118extern int __get_user_2(void *);
119extern int __get_user_4(void *);
d9981380 120extern int __get_user_32t_8(void *);
e38361d0 121extern int __get_user_8(void *);
d9981380
VK
122extern int __get_user_64t_1(void *);
123extern int __get_user_64t_2(void *);
124extern int __get_user_64t_4(void *);
1da177e4 125
8404663f
RK
126#define __GUP_CLOBBER_1 "lr", "cc"
127#ifdef CONFIG_CPU_USE_DOMAINS
128#define __GUP_CLOBBER_2 "ip", "lr", "cc"
129#else
130#define __GUP_CLOBBER_2 "lr", "cc"
131#endif
132#define __GUP_CLOBBER_4 "lr", "cc"
d9981380 133#define __GUP_CLOBBER_32t_8 "lr", "cc"
e38361d0 134#define __GUP_CLOBBER_8 "lr", "cc"
8404663f 135
295bb01e 136#define __get_user_x(__r2, __p, __e, __l, __s) \
1da177e4
LT
137 __asm__ __volatile__ ( \
138 __asmeq("%0", "r0") __asmeq("%1", "r2") \
8404663f 139 __asmeq("%3", "r1") \
1da177e4
LT
140 "bl __get_user_" #__s \
141 : "=&r" (__e), "=r" (__r2) \
8404663f
RK
142 : "0" (__p), "r" (__l) \
143 : __GUP_CLOBBER_##__s)
1da177e4 144
e38361d0
DT
145/* narrowing a double-word get into a single 32bit word register: */
146#ifdef __ARMEB__
295bb01e 147#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
d9981380 148 __get_user_x(__r2, __p, __e, __l, 32t_8)
e38361d0 149#else
d9981380 150#define __get_user_x_32t __get_user_x
e38361d0
DT
151#endif
152
d9981380
VK
153/*
154 * storing result into proper least significant word of 64bit target var,
155 * different only for big endian case where 64 bit __r2 lsw is r3:
156 */
157#ifdef __ARMEB__
158#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
159 __asm__ __volatile__ ( \
160 __asmeq("%0", "r0") __asmeq("%1", "r2") \
161 __asmeq("%3", "r1") \
162 "bl __get_user_64t_" #__s \
163 : "=&r" (__e), "=r" (__r2) \
164 : "0" (__p), "r" (__l) \
165 : __GUP_CLOBBER_##__s)
166#else
167#define __get_user_x_64t __get_user_x
168#endif
169
170
295bb01e 171#define __get_user_check(x, p) \
1da177e4 172 ({ \
8ac6f5d7 173 unsigned long __limit = TASK_SIZE - 1; \
db4667a8 174 register typeof(*(p)) __user *__p asm("r0") = (p); \
d09fbb32 175 register __inttype(x) __r2 asm("r2"); \
8404663f 176 register unsigned long __l asm("r1") = __limit; \
1da177e4 177 register int __e asm("r0"); \
3fba7e23 178 unsigned int __ua_flags = uaccess_save_and_enable(); \
1da177e4
LT
179 switch (sizeof(*(__p))) { \
180 case 1: \
d9981380
VK
181 if (sizeof((x)) >= 8) \
182 __get_user_x_64t(__r2, __p, __e, __l, 1); \
183 else \
184 __get_user_x(__r2, __p, __e, __l, 1); \
8404663f 185 break; \
1da177e4 186 case 2: \
d9981380
VK
187 if (sizeof((x)) >= 8) \
188 __get_user_x_64t(__r2, __p, __e, __l, 2); \
189 else \
190 __get_user_x(__r2, __p, __e, __l, 2); \
1da177e4
LT
191 break; \
192 case 4: \
d9981380
VK
193 if (sizeof((x)) >= 8) \
194 __get_user_x_64t(__r2, __p, __e, __l, 4); \
195 else \
196 __get_user_x(__r2, __p, __e, __l, 4); \
1da177e4 197 break; \
e38361d0
DT
198 case 8: \
199 if (sizeof((x)) < 8) \
d9981380 200 __get_user_x_32t(__r2, __p, __e, __l, 4); \
e38361d0
DT
201 else \
202 __get_user_x(__r2, __p, __e, __l, 8); \
203 break; \
1da177e4
LT
204 default: __e = __get_user_bad(); break; \
205 } \
3fba7e23 206 uaccess_restore(__ua_flags); \
d2c5b690 207 x = (typeof(*(p))) __r2; \
1da177e4
LT
208 __e; \
209 })
210
295bb01e 211#define get_user(x, p) \
ad72907a
WD
212 ({ \
213 might_fault(); \
295bb01e 214 __get_user_check(x, p); \
ad72907a
WD
215 })
216
9641c7cc
RK
217extern int __put_user_1(void *, unsigned int);
218extern int __put_user_2(void *, unsigned int);
219extern int __put_user_4(void *, unsigned int);
220extern int __put_user_8(void *, unsigned long long);
221
9f73bd8b 222#define __put_user_check(__pu_val, __ptr, __err, __s) \
9641c7cc 223 ({ \
8ac6f5d7 224 unsigned long __limit = TASK_SIZE - 1; \
9f73bd8b
RK
225 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
226 register const void __user *__p asm("r0") = __ptr; \
8404663f 227 register unsigned long __l asm("r1") = __limit; \
9641c7cc 228 register int __e asm("r0"); \
9f73bd8b
RK
229 __asm__ __volatile__ ( \
230 __asmeq("%0", "r0") __asmeq("%2", "r2") \
231 __asmeq("%3", "r1") \
232 "bl __put_user_" #__s \
233 : "=&r" (__e) \
234 : "0" (__p), "r" (__r2), "r" (__l) \
235 : "ip", "lr", "cc"); \
236 __err = __e; \
9641c7cc
RK
237 })
238
239#else /* CONFIG_MMU */
240
295bb01e
MT
241#define __addr_ok(addr) ((void)(addr), 1)
242#define __range_ok(addr, size) ((void)(addr), 0)
9641c7cc 243
295bb01e 244#define get_user(x, p) __get_user(x, p)
9f73bd8b 245#define __put_user_check __put_user_nocheck
9641c7cc
RK
246
247#endif /* CONFIG_MMU */
248
96d4f267 249#define access_ok(addr, size) (__range_ok(addr, size) == 0)
9641c7cc 250
b1cd0a14
RK
251#ifdef CONFIG_CPU_SPECTRE
252/*
253 * When mitigating Spectre variant 1, it is not worth fixing the non-
254 * verifying accessors, because we need to add verification of the
255 * address space there. Force these to use the standard get_user()
256 * version instead.
257 */
258#define __get_user(x, ptr) get_user(x, ptr)
259#else
260
9641c7cc
RK
261/*
262 * The "__xxx" versions of the user access functions do not verify the
263 * address space - it must have been done previously with a separate
264 * "access_ok()" call.
265 *
266 * The "xxx_error" versions set the third argument to EFAULT if an
267 * error occurs, and leave it unchanged on success. Note that these
268 * versions are void (ie, don't return a value as such).
269 */
295bb01e 270#define __get_user(x, ptr) \
1da177e4
LT
271({ \
272 long __gu_err = 0; \
2df4c9a7 273 __get_user_err((x), (ptr), __gu_err, TUSER()); \
1da177e4
LT
274 __gu_err; \
275})
276
2df4c9a7 277#define __get_user_err(x, ptr, err, __t) \
1da177e4
LT
278do { \
279 unsigned long __gu_addr = (unsigned long)(ptr); \
280 unsigned long __gu_val; \
3fba7e23 281 unsigned int __ua_flags; \
1da177e4 282 __chk_user_ptr(ptr); \
ad72907a 283 might_fault(); \
3fba7e23 284 __ua_flags = uaccess_save_and_enable(); \
1da177e4 285 switch (sizeof(*(ptr))) { \
2df4c9a7
AB
286 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
287 case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
288 case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
1da177e4
LT
289 default: (__gu_val) = __get_user_bad(); \
290 } \
3fba7e23 291 uaccess_restore(__ua_flags); \
1da177e4
LT
292 (x) = (__typeof__(*(ptr)))__gu_val; \
293} while (0)
2df4c9a7 294#endif
1da177e4 295
b64d1f66 296#define __get_user_asm(x, addr, err, instr) \
1da177e4 297 __asm__ __volatile__( \
2df4c9a7 298 "1: " instr " %1, [%2], #0\n" \
1da177e4 299 "2:\n" \
c4a84ae3 300 " .pushsection .text.fixup,\"ax\"\n" \
1da177e4
LT
301 " .align 2\n" \
302 "3: mov %0, %3\n" \
303 " mov %1, #0\n" \
304 " b 2b\n" \
4260415f
RK
305 " .popsection\n" \
306 " .pushsection __ex_table,\"a\"\n" \
1da177e4
LT
307 " .align 3\n" \
308 " .long 1b, 3b\n" \
4260415f 309 " .popsection" \
1da177e4
LT
310 : "+r" (err), "=&r" (x) \
311 : "r" (addr), "i" (-EFAULT) \
312 : "cc")
313
2df4c9a7
AB
314#define __get_user_asm_byte(x, addr, err, __t) \
315 __get_user_asm(x, addr, err, "ldrb" __t)
b64d1f66 316
344eb553
VW
317#if __LINUX_ARM_ARCH__ >= 6
318
2df4c9a7
AB
319#define __get_user_asm_half(x, addr, err, __t) \
320 __get_user_asm(x, addr, err, "ldrh" __t)
344eb553
VW
321
322#else
323
1da177e4 324#ifndef __ARMEB__
2df4c9a7 325#define __get_user_asm_half(x, __gu_addr, err, __t) \
1da177e4
LT
326({ \
327 unsigned long __b1, __b2; \
2df4c9a7
AB
328 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
329 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
1da177e4
LT
330 (x) = __b1 | (__b2 << 8); \
331})
332#else
2df4c9a7 333#define __get_user_asm_half(x, __gu_addr, err, __t) \
1da177e4
LT
334({ \
335 unsigned long __b1, __b2; \
2df4c9a7
AB
336 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
337 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
1da177e4
LT
338 (x) = (__b1 << 8) | __b2; \
339})
340#endif
341
344eb553
VW
342#endif /* __LINUX_ARM_ARCH__ >= 6 */
343
2df4c9a7
AB
344#define __get_user_asm_word(x, addr, err, __t) \
345 __get_user_asm(x, addr, err, "ldr" __t)
9f73bd8b
RK
346
347#define __put_user_switch(x, ptr, __err, __fn) \
348 do { \
349 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
350 __typeof__(*(ptr)) __pu_val = (x); \
351 unsigned int __ua_flags; \
352 might_fault(); \
353 __ua_flags = uaccess_save_and_enable(); \
354 switch (sizeof(*(ptr))) { \
355 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
356 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
357 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
358 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
359 default: __err = __put_user_bad(); break; \
360 } \
361 uaccess_restore(__ua_flags); \
362 } while (0)
363
364#define put_user(x, ptr) \
365({ \
366 int __pu_err = 0; \
367 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
368 __pu_err; \
369})
370
e3aa6243
JT
371#ifdef CONFIG_CPU_SPECTRE
372/*
373 * When mitigating Spectre variant 1.1, all accessors need to include
374 * verification of the address space.
375 */
376#define __put_user(x, ptr) put_user(x, ptr)
377
378#else
295bb01e 379#define __put_user(x, ptr) \
1da177e4
LT
380({ \
381 long __pu_err = 0; \
9f73bd8b 382 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
1da177e4
LT
383 __pu_err; \
384})
385
9f73bd8b
RK
386#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
387 do { \
388 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
2df4c9a7 389 __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
9f73bd8b
RK
390 } while (0)
391
392#define __put_user_nocheck_1 __put_user_asm_byte
393#define __put_user_nocheck_2 __put_user_asm_half
394#define __put_user_nocheck_4 __put_user_asm_word
395#define __put_user_nocheck_8 __put_user_asm_dword
1da177e4 396
2df4c9a7
AB
397#endif /* !CONFIG_CPU_SPECTRE */
398
b64d1f66 399#define __put_user_asm(x, __pu_addr, err, instr) \
1da177e4 400 __asm__ __volatile__( \
2df4c9a7 401 "1: " instr " %1, [%2], #0\n" \
1da177e4 402 "2:\n" \
c4a84ae3 403 " .pushsection .text.fixup,\"ax\"\n" \
1da177e4
LT
404 " .align 2\n" \
405 "3: mov %0, %3\n" \
406 " b 2b\n" \
4260415f
RK
407 " .popsection\n" \
408 " .pushsection __ex_table,\"a\"\n" \
1da177e4
LT
409 " .align 3\n" \
410 " .long 1b, 3b\n" \
4260415f 411 " .popsection" \
1da177e4
LT
412 : "+r" (err) \
413 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
414 : "cc")
415
2df4c9a7
AB
416#define __put_user_asm_byte(x, __pu_addr, err, __t) \
417 __put_user_asm(x, __pu_addr, err, "strb" __t)
b64d1f66 418
344eb553
VW
419#if __LINUX_ARM_ARCH__ >= 6
420
2df4c9a7
AB
421#define __put_user_asm_half(x, __pu_addr, err, __t) \
422 __put_user_asm(x, __pu_addr, err, "strh" __t)
344eb553
VW
423
424#else
425
1da177e4 426#ifndef __ARMEB__
2df4c9a7 427#define __put_user_asm_half(x, __pu_addr, err, __t) \
1da177e4 428({ \
e8b94dea 429 unsigned long __temp = (__force unsigned long)(x); \
2df4c9a7
AB
430 __put_user_asm_byte(__temp, __pu_addr, err, __t); \
431 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
1da177e4
LT
432})
433#else
2df4c9a7 434#define __put_user_asm_half(x, __pu_addr, err, __t) \
1da177e4 435({ \
e8b94dea 436 unsigned long __temp = (__force unsigned long)(x); \
2df4c9a7
AB
437 __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
438 __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
1da177e4
LT
439})
440#endif
441
344eb553
VW
442#endif /* __LINUX_ARM_ARCH__ >= 6 */
443
2df4c9a7
AB
444#define __put_user_asm_word(x, __pu_addr, err, __t) \
445 __put_user_asm(x, __pu_addr, err, "str" __t)
1da177e4
LT
446
447#ifndef __ARMEB__
448#define __reg_oper0 "%R2"
449#define __reg_oper1 "%Q2"
450#else
451#define __reg_oper0 "%Q2"
452#define __reg_oper1 "%R2"
453#endif
454
2df4c9a7 455#define __put_user_asm_dword(x, __pu_addr, err, __t) \
1da177e4 456 __asm__ __volatile__( \
2df4c9a7
AB
457 ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
458 ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
459 THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
460 THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
1da177e4 461 "3:\n" \
c4a84ae3 462 " .pushsection .text.fixup,\"ax\"\n" \
1da177e4
LT
463 " .align 2\n" \
464 "4: mov %0, %3\n" \
465 " b 3b\n" \
4260415f
RK
466 " .popsection\n" \
467 " .pushsection __ex_table,\"a\"\n" \
1da177e4
LT
468 " .align 3\n" \
469 " .long 1b, 4b\n" \
470 " .long 2b, 4b\n" \
4260415f 471 " .popsection" \
1da177e4
LT
472 : "+r" (err), "+r" (__pu_addr) \
473 : "r" (x), "i" (-EFAULT) \
474 : "cc")
475
2df4c9a7
AB
476#define HAVE_GET_KERNEL_NOFAULT
477
478#define __get_kernel_nofault(dst, src, type, err_label) \
479do { \
480 const type *__pk_ptr = (src); \
481 unsigned long __src = (unsigned long)(__pk_ptr); \
482 type __val; \
483 int __err = 0; \
484 switch (sizeof(type)) { \
485 case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
486 case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
487 case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
488 case 8: { \
489 u32 *__v32 = (u32*)&__val; \
490 __get_user_asm_word(__v32[0], __src, __err, ""); \
491 if (__err) \
492 break; \
493 __get_user_asm_word(__v32[1], __src+4, __err, ""); \
494 break; \
495 } \
496 default: __err = __get_user_bad(); break; \
497 } \
498 *(type *)(dst) = __val; \
499 if (__err) \
500 goto err_label; \
501} while (0)
502
503#define __put_kernel_nofault(dst, src, type, err_label) \
504do { \
505 const type *__pk_ptr = (dst); \
506 unsigned long __dst = (unsigned long)__pk_ptr; \
507 int __err = 0; \
508 type __val = *(type *)src; \
509 switch (sizeof(type)) { \
510 case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
511 case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
512 case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
513 case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
514 default: __err = __put_user_bad(); break; \
515 } \
516 if (__err) \
517 goto err_label; \
518} while (0)
02fcb974 519
9641c7cc 520#ifdef CONFIG_MMU
3fba7e23
RK
521extern unsigned long __must_check
522arm_copy_from_user(void *to, const void __user *from, unsigned long n);
523
524static inline unsigned long __must_check
4de5b63e 525raw_copy_from_user(void *to, const void __user *from, unsigned long n)
3fba7e23 526{
dfd45b61
KC
527 unsigned int __ua_flags;
528
dfd45b61 529 __ua_flags = uaccess_save_and_enable();
3fba7e23
RK
530 n = arm_copy_from_user(to, from, n);
531 uaccess_restore(__ua_flags);
532 return n;
533}
534
535extern unsigned long __must_check
536arm_copy_to_user(void __user *to, const void *from, unsigned long n);
537extern unsigned long __must_check
538__copy_to_user_std(void __user *to, const void *from, unsigned long n);
539
540static inline unsigned long __must_check
4de5b63e 541raw_copy_to_user(void __user *to, const void *from, unsigned long n)
3fba7e23 542{
c014953d 543#ifndef CONFIG_UACCESS_WITH_MEMCPY
dfd45b61 544 unsigned int __ua_flags;
dfd45b61 545 __ua_flags = uaccess_save_and_enable();
3fba7e23
RK
546 n = arm_copy_to_user(to, from, n);
547 uaccess_restore(__ua_flags);
548 return n;
c014953d
RK
549#else
550 return arm_copy_to_user(to, from, n);
551#endif
3fba7e23
RK
552}
553
554extern unsigned long __must_check
555arm_clear_user(void __user *addr, unsigned long n);
556extern unsigned long __must_check
557__clear_user_std(void __user *addr, unsigned long n);
558
559static inline unsigned long __must_check
560__clear_user(void __user *addr, unsigned long n)
561{
562 unsigned int __ua_flags = uaccess_save_and_enable();
563 n = arm_clear_user(addr, n);
564 uaccess_restore(__ua_flags);
565 return n;
566}
567
9641c7cc 568#else
4de5b63e
AV
569static inline unsigned long
570raw_copy_from_user(void *to, const void __user *from, unsigned long n)
32b14363 571{
4de5b63e
AV
572 memcpy(to, (const void __force *)from, n);
573 return 0;
32b14363 574}
4de5b63e
AV
575static inline unsigned long
576raw_copy_to_user(void __user *to, const void *from, unsigned long n)
32b14363 577{
4de5b63e
AV
578 memcpy((void __force *)to, from, n);
579 return 0;
1da177e4 580}
4de5b63e
AV
581#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
582#endif
583#define INLINE_COPY_TO_USER
584#define INLINE_COPY_FROM_USER
1da177e4 585
99573298 586static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
1da177e4 587{
96d4f267 588 if (access_ok(to, n))
02fcb974 589 n = __clear_user(to, n);
1da177e4
LT
590 return n;
591}
592
3fba7e23 593/* These are from lib/ code, and use __get_user() and friends */
8c56cc8b 594extern long strncpy_from_user(char *dest, const char __user *src, long count);
1da177e4 595
8c56cc8b 596extern __must_check long strnlen_user(const char __user *str, long n);
1da177e4
LT
597
598#endif /* _ASMARM_UACCESS_H */