]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm/include/asm/uaccess.h
scsi: qedf: Fix a potential NULL pointer dereference
[mirror_ubuntu-artful-kernel.git] / arch / arm / include / asm / uaccess.h
1 /*
2 * arch/arm/include/asm/uaccess.h
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #ifndef _ASMARM_UACCESS_H
9 #define _ASMARM_UACCESS_H
10
11 /*
12 * User space memory access functions
13 */
14 #include <linux/string.h>
15 #include <asm/memory.h>
16 #include <asm/domain.h>
17 #include <asm/unified.h>
18 #include <asm/compiler.h>
19
20 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
21 #include <asm-generic/uaccess-unaligned.h>
22 #else
23 #define __get_user_unaligned __get_user
24 #define __put_user_unaligned __put_user
25 #endif
26
27 #include <asm/extable.h>
28
29 /*
30 * These two functions allow hooking accesses to userspace to increase
31 * system integrity by ensuring that the kernel can not inadvertantly
32 * perform such accesses (eg, via list poison values) which could then
33 * be exploited for priviledge escalation.
34 */
35 static inline unsigned int uaccess_save_and_enable(void)
36 {
37 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
38 unsigned int old_domain = get_domain();
39
40 /* Set the current domain access to permit user accesses */
41 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
42 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
43
44 return old_domain;
45 #else
46 return 0;
47 #endif
48 }
49
50 static inline void uaccess_restore(unsigned int flags)
51 {
52 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
53 /* Restore the user access mask */
54 set_domain(flags);
55 #endif
56 }
57
58 /*
59 * These two are intentionally not defined anywhere - if the kernel
60 * code generates any references to them, that's a bug.
61 */
62 extern int __get_user_bad(void);
63 extern int __put_user_bad(void);
64
65 /*
66 * Note that this is actually 0x1,0000,0000
67 */
68 #define KERNEL_DS 0x00000000
69 #define get_ds() (KERNEL_DS)
70
71 #ifdef CONFIG_MMU
72
73 #define USER_DS TASK_SIZE
74 #define get_fs() (current_thread_info()->addr_limit)
75
76 static inline void set_fs(mm_segment_t fs)
77 {
78 current_thread_info()->addr_limit = fs;
79 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
80 }
81
82 #define segment_eq(a, b) ((a) == (b))
83
84 /* We use 33-bit arithmetic here... */
85 #define __range_ok(addr, size) ({ \
86 unsigned long flag, roksum; \
87 __chk_user_ptr(addr); \
88 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
89 : "=&r" (flag), "=&r" (roksum) \
90 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
91 : "cc"); \
92 flag; })
93
94 /*
95 * Single-value transfer routines. They automatically use the right
96 * size if we just have the right pointer type. Note that the functions
97 * which read from user space (*get_*) need to take care not to leak
98 * kernel data even if the calling code is buggy and fails to check
99 * the return value. This means zeroing out the destination variable
100 * or buffer on error. Normally this is done out of line by the
101 * fixup code, but there are a few places where it intrudes on the
102 * main code path. When we only write to user space, there is no
103 * problem.
104 */
105 extern int __get_user_1(void *);
106 extern int __get_user_2(void *);
107 extern int __get_user_4(void *);
108 extern int __get_user_32t_8(void *);
109 extern int __get_user_8(void *);
110 extern int __get_user_64t_1(void *);
111 extern int __get_user_64t_2(void *);
112 extern int __get_user_64t_4(void *);
113
114 #define __GUP_CLOBBER_1 "lr", "cc"
115 #ifdef CONFIG_CPU_USE_DOMAINS
116 #define __GUP_CLOBBER_2 "ip", "lr", "cc"
117 #else
118 #define __GUP_CLOBBER_2 "lr", "cc"
119 #endif
120 #define __GUP_CLOBBER_4 "lr", "cc"
121 #define __GUP_CLOBBER_32t_8 "lr", "cc"
122 #define __GUP_CLOBBER_8 "lr", "cc"
123
124 #define __get_user_x(__r2, __p, __e, __l, __s) \
125 __asm__ __volatile__ ( \
126 __asmeq("%0", "r0") __asmeq("%1", "r2") \
127 __asmeq("%3", "r1") \
128 "bl __get_user_" #__s \
129 : "=&r" (__e), "=r" (__r2) \
130 : "0" (__p), "r" (__l) \
131 : __GUP_CLOBBER_##__s)
132
133 /* narrowing a double-word get into a single 32bit word register: */
134 #ifdef __ARMEB__
135 #define __get_user_x_32t(__r2, __p, __e, __l, __s) \
136 __get_user_x(__r2, __p, __e, __l, 32t_8)
137 #else
138 #define __get_user_x_32t __get_user_x
139 #endif
140
141 /*
142 * storing result into proper least significant word of 64bit target var,
143 * different only for big endian case where 64 bit __r2 lsw is r3:
144 */
145 #ifdef __ARMEB__
146 #define __get_user_x_64t(__r2, __p, __e, __l, __s) \
147 __asm__ __volatile__ ( \
148 __asmeq("%0", "r0") __asmeq("%1", "r2") \
149 __asmeq("%3", "r1") \
150 "bl __get_user_64t_" #__s \
151 : "=&r" (__e), "=r" (__r2) \
152 : "0" (__p), "r" (__l) \
153 : __GUP_CLOBBER_##__s)
154 #else
155 #define __get_user_x_64t __get_user_x
156 #endif
157
158
159 #define __get_user_check(x, p) \
160 ({ \
161 unsigned long __limit = current_thread_info()->addr_limit - 1; \
162 register const typeof(*(p)) __user *__p asm("r0") = (p);\
163 register typeof(x) __r2 asm("r2"); \
164 register unsigned long __l asm("r1") = __limit; \
165 register int __e asm("r0"); \
166 unsigned int __ua_flags = uaccess_save_and_enable(); \
167 switch (sizeof(*(__p))) { \
168 case 1: \
169 if (sizeof((x)) >= 8) \
170 __get_user_x_64t(__r2, __p, __e, __l, 1); \
171 else \
172 __get_user_x(__r2, __p, __e, __l, 1); \
173 break; \
174 case 2: \
175 if (sizeof((x)) >= 8) \
176 __get_user_x_64t(__r2, __p, __e, __l, 2); \
177 else \
178 __get_user_x(__r2, __p, __e, __l, 2); \
179 break; \
180 case 4: \
181 if (sizeof((x)) >= 8) \
182 __get_user_x_64t(__r2, __p, __e, __l, 4); \
183 else \
184 __get_user_x(__r2, __p, __e, __l, 4); \
185 break; \
186 case 8: \
187 if (sizeof((x)) < 8) \
188 __get_user_x_32t(__r2, __p, __e, __l, 4); \
189 else \
190 __get_user_x(__r2, __p, __e, __l, 8); \
191 break; \
192 default: __e = __get_user_bad(); break; \
193 } \
194 uaccess_restore(__ua_flags); \
195 x = (typeof(*(p))) __r2; \
196 __e; \
197 })
198
199 #define get_user(x, p) \
200 ({ \
201 might_fault(); \
202 __get_user_check(x, p); \
203 })
204
205 extern int __put_user_1(void *, unsigned int);
206 extern int __put_user_2(void *, unsigned int);
207 extern int __put_user_4(void *, unsigned int);
208 extern int __put_user_8(void *, unsigned long long);
209
210 #define __put_user_check(__pu_val, __ptr, __err, __s) \
211 ({ \
212 unsigned long __limit = current_thread_info()->addr_limit - 1; \
213 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
214 register const void __user *__p asm("r0") = __ptr; \
215 register unsigned long __l asm("r1") = __limit; \
216 register int __e asm("r0"); \
217 __asm__ __volatile__ ( \
218 __asmeq("%0", "r0") __asmeq("%2", "r2") \
219 __asmeq("%3", "r1") \
220 "bl __put_user_" #__s \
221 : "=&r" (__e) \
222 : "0" (__p), "r" (__r2), "r" (__l) \
223 : "ip", "lr", "cc"); \
224 __err = __e; \
225 })
226
227 #else /* CONFIG_MMU */
228
229 /*
230 * uClinux has only one addr space, so has simplified address limits.
231 */
232 #define USER_DS KERNEL_DS
233
234 #define segment_eq(a, b) (1)
235 #define __addr_ok(addr) ((void)(addr), 1)
236 #define __range_ok(addr, size) ((void)(addr), 0)
237 #define get_fs() (KERNEL_DS)
238
239 static inline void set_fs(mm_segment_t fs)
240 {
241 }
242
243 #define get_user(x, p) __get_user(x, p)
244 #define __put_user_check __put_user_nocheck
245
246 #endif /* CONFIG_MMU */
247
248 #define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
249
250 #define user_addr_max() \
251 (uaccess_kernel() ? ~0UL : get_fs())
252
253 /*
254 * The "__xxx" versions of the user access functions do not verify the
255 * address space - it must have been done previously with a separate
256 * "access_ok()" call.
257 *
258 * The "xxx_error" versions set the third argument to EFAULT if an
259 * error occurs, and leave it unchanged on success. Note that these
260 * versions are void (ie, don't return a value as such).
261 */
262 #define __get_user(x, ptr) \
263 ({ \
264 long __gu_err = 0; \
265 __get_user_err((x), (ptr), __gu_err); \
266 __gu_err; \
267 })
268
269 #define __get_user_error(x, ptr, err) \
270 ({ \
271 __get_user_err((x), (ptr), err); \
272 (void) 0; \
273 })
274
275 #define __get_user_err(x, ptr, err) \
276 do { \
277 unsigned long __gu_addr = (unsigned long)(ptr); \
278 unsigned long __gu_val; \
279 unsigned int __ua_flags; \
280 __chk_user_ptr(ptr); \
281 might_fault(); \
282 __ua_flags = uaccess_save_and_enable(); \
283 switch (sizeof(*(ptr))) { \
284 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
285 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
286 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
287 default: (__gu_val) = __get_user_bad(); \
288 } \
289 uaccess_restore(__ua_flags); \
290 (x) = (__typeof__(*(ptr)))__gu_val; \
291 } while (0)
292
293 #define __get_user_asm(x, addr, err, instr) \
294 __asm__ __volatile__( \
295 "1: " TUSER(instr) " %1, [%2], #0\n" \
296 "2:\n" \
297 " .pushsection .text.fixup,\"ax\"\n" \
298 " .align 2\n" \
299 "3: mov %0, %3\n" \
300 " mov %1, #0\n" \
301 " b 2b\n" \
302 " .popsection\n" \
303 " .pushsection __ex_table,\"a\"\n" \
304 " .align 3\n" \
305 " .long 1b, 3b\n" \
306 " .popsection" \
307 : "+r" (err), "=&r" (x) \
308 : "r" (addr), "i" (-EFAULT) \
309 : "cc")
310
311 #define __get_user_asm_byte(x, addr, err) \
312 __get_user_asm(x, addr, err, ldrb)
313
314 #ifndef __ARMEB__
315 #define __get_user_asm_half(x, __gu_addr, err) \
316 ({ \
317 unsigned long __b1, __b2; \
318 __get_user_asm_byte(__b1, __gu_addr, err); \
319 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
320 (x) = __b1 | (__b2 << 8); \
321 })
322 #else
323 #define __get_user_asm_half(x, __gu_addr, err) \
324 ({ \
325 unsigned long __b1, __b2; \
326 __get_user_asm_byte(__b1, __gu_addr, err); \
327 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
328 (x) = (__b1 << 8) | __b2; \
329 })
330 #endif
331
332 #define __get_user_asm_word(x, addr, err) \
333 __get_user_asm(x, addr, err, ldr)
334
335
336 #define __put_user_switch(x, ptr, __err, __fn) \
337 do { \
338 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
339 __typeof__(*(ptr)) __pu_val = (x); \
340 unsigned int __ua_flags; \
341 might_fault(); \
342 __ua_flags = uaccess_save_and_enable(); \
343 switch (sizeof(*(ptr))) { \
344 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
345 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
346 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
347 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
348 default: __err = __put_user_bad(); break; \
349 } \
350 uaccess_restore(__ua_flags); \
351 } while (0)
352
353 #define put_user(x, ptr) \
354 ({ \
355 int __pu_err = 0; \
356 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
357 __pu_err; \
358 })
359
360 #define __put_user(x, ptr) \
361 ({ \
362 long __pu_err = 0; \
363 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
364 __pu_err; \
365 })
366
367 #define __put_user_error(x, ptr, err) \
368 ({ \
369 __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
370 (void) 0; \
371 })
372
373 #define __put_user_nocheck(x, __pu_ptr, __err, __size) \
374 do { \
375 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
376 __put_user_nocheck_##__size(x, __pu_addr, __err); \
377 } while (0)
378
379 #define __put_user_nocheck_1 __put_user_asm_byte
380 #define __put_user_nocheck_2 __put_user_asm_half
381 #define __put_user_nocheck_4 __put_user_asm_word
382 #define __put_user_nocheck_8 __put_user_asm_dword
383
384 #define __put_user_asm(x, __pu_addr, err, instr) \
385 __asm__ __volatile__( \
386 "1: " TUSER(instr) " %1, [%2], #0\n" \
387 "2:\n" \
388 " .pushsection .text.fixup,\"ax\"\n" \
389 " .align 2\n" \
390 "3: mov %0, %3\n" \
391 " b 2b\n" \
392 " .popsection\n" \
393 " .pushsection __ex_table,\"a\"\n" \
394 " .align 3\n" \
395 " .long 1b, 3b\n" \
396 " .popsection" \
397 : "+r" (err) \
398 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
399 : "cc")
400
401 #define __put_user_asm_byte(x, __pu_addr, err) \
402 __put_user_asm(x, __pu_addr, err, strb)
403
404 #ifndef __ARMEB__
405 #define __put_user_asm_half(x, __pu_addr, err) \
406 ({ \
407 unsigned long __temp = (__force unsigned long)(x); \
408 __put_user_asm_byte(__temp, __pu_addr, err); \
409 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
410 })
411 #else
412 #define __put_user_asm_half(x, __pu_addr, err) \
413 ({ \
414 unsigned long __temp = (__force unsigned long)(x); \
415 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
416 __put_user_asm_byte(__temp, __pu_addr + 1, err); \
417 })
418 #endif
419
420 #define __put_user_asm_word(x, __pu_addr, err) \
421 __put_user_asm(x, __pu_addr, err, str)
422
423 #ifndef __ARMEB__
424 #define __reg_oper0 "%R2"
425 #define __reg_oper1 "%Q2"
426 #else
427 #define __reg_oper0 "%Q2"
428 #define __reg_oper1 "%R2"
429 #endif
430
431 #define __put_user_asm_dword(x, __pu_addr, err) \
432 __asm__ __volatile__( \
433 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
434 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
435 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
436 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
437 "3:\n" \
438 " .pushsection .text.fixup,\"ax\"\n" \
439 " .align 2\n" \
440 "4: mov %0, %3\n" \
441 " b 3b\n" \
442 " .popsection\n" \
443 " .pushsection __ex_table,\"a\"\n" \
444 " .align 3\n" \
445 " .long 1b, 4b\n" \
446 " .long 2b, 4b\n" \
447 " .popsection" \
448 : "+r" (err), "+r" (__pu_addr) \
449 : "r" (x), "i" (-EFAULT) \
450 : "cc")
451
452
453 #ifdef CONFIG_MMU
454 extern unsigned long __must_check
455 arm_copy_from_user(void *to, const void __user *from, unsigned long n);
456
457 static inline unsigned long __must_check
458 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
459 {
460 unsigned int __ua_flags;
461
462 __ua_flags = uaccess_save_and_enable();
463 n = arm_copy_from_user(to, from, n);
464 uaccess_restore(__ua_flags);
465 return n;
466 }
467
468 extern unsigned long __must_check
469 arm_copy_to_user(void __user *to, const void *from, unsigned long n);
470 extern unsigned long __must_check
471 __copy_to_user_std(void __user *to, const void *from, unsigned long n);
472
473 static inline unsigned long __must_check
474 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
475 {
476 #ifndef CONFIG_UACCESS_WITH_MEMCPY
477 unsigned int __ua_flags;
478 __ua_flags = uaccess_save_and_enable();
479 n = arm_copy_to_user(to, from, n);
480 uaccess_restore(__ua_flags);
481 return n;
482 #else
483 return arm_copy_to_user(to, from, n);
484 #endif
485 }
486
487 extern unsigned long __must_check
488 arm_clear_user(void __user *addr, unsigned long n);
489 extern unsigned long __must_check
490 __clear_user_std(void __user *addr, unsigned long n);
491
492 static inline unsigned long __must_check
493 __clear_user(void __user *addr, unsigned long n)
494 {
495 unsigned int __ua_flags = uaccess_save_and_enable();
496 n = arm_clear_user(addr, n);
497 uaccess_restore(__ua_flags);
498 return n;
499 }
500
501 #else
502 static inline unsigned long
503 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
504 {
505 memcpy(to, (const void __force *)from, n);
506 return 0;
507 }
508 static inline unsigned long
509 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
510 {
511 memcpy((void __force *)to, from, n);
512 return 0;
513 }
514 #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
515 #endif
516 #define INLINE_COPY_TO_USER
517 #define INLINE_COPY_FROM_USER
518
519 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
520 {
521 if (access_ok(VERIFY_WRITE, to, n))
522 n = __clear_user(to, n);
523 return n;
524 }
525
526 /* These are from lib/ code, and use __get_user() and friends */
527 extern long strncpy_from_user(char *dest, const char __user *src, long count);
528
529 extern __must_check long strnlen_user(const char __user *str, long n);
530
531 #endif /* _ASMARM_UACCESS_H */