]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/mn10300/include/asm/uaccess.h
ASoC: sti: fix missing clk_disable_unprepare() on error in uni_player_start()
[mirror_ubuntu-zesty-kernel.git] / arch / mn10300 / include / asm / uaccess.h
1 /* MN10300 userspace access functions
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13
14 /*
15 * User space memory access functions
16 */
17 #include <linux/thread_info.h>
18 #include <linux/kernel.h>
19 #include <asm/page.h>
20 #include <asm/errno.h>
21
22 #define VERIFY_READ 0
23 #define VERIFY_WRITE 1
24
25 /*
26 * The fs value determines whether argument validity checking should be
27 * performed or not. If get_fs() == USER_DS, checking is performed, with
28 * get_fs() == KERNEL_DS, checking is bypassed.
29 *
30 * For historical reasons, these macros are grossly misnamed.
31 */
32 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
33
34 #define KERNEL_XDS MAKE_MM_SEG(0xBFFFFFFF)
35 #define KERNEL_DS MAKE_MM_SEG(0x9FFFFFFF)
36 #define USER_DS MAKE_MM_SEG(TASK_SIZE)
37
38 #define get_ds() (KERNEL_DS)
39 #define get_fs() (current_thread_info()->addr_limit)
40 #define set_fs(x) (current_thread_info()->addr_limit = (x))
41 #define __kernel_ds_p() (current_thread_info()->addr_limit.seg == 0x9FFFFFFF)
42
43 #define segment_eq(a, b) ((a).seg == (b).seg)
44
45 #define __addr_ok(addr) \
46 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
47
48 /*
49 * check that a range of addresses falls within the current address limit
50 */
51 static inline int ___range_ok(unsigned long addr, unsigned int size)
52 {
53 int flag = 1, tmp;
54
55 asm(" add %3,%1 \n" /* set C-flag if addr + size > 4Gb */
56 " bcs 0f \n"
57 " cmp %4,%1 \n" /* jump if addr+size>limit (error) */
58 " bhi 0f \n"
59 " clr %0 \n" /* mark okay */
60 "0: \n"
61 : "=r"(flag), "=&r"(tmp)
62 : "1"(addr), "ir"(size),
63 "r"(current_thread_info()->addr_limit.seg), "0"(flag)
64 : "cc"
65 );
66
67 return flag;
68 }
69
70 #define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size))
71
72 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
73 #define __access_ok(addr, size) (__range_ok((addr), (size)) == 0)
74
75 static inline int verify_area(int type, const void *addr, unsigned long size)
76 {
77 return access_ok(type, addr, size) ? 0 : -EFAULT;
78 }
79
80
81 /*
82 * The exception table consists of pairs of addresses: the first is the
83 * address of an instruction that is allowed to fault, and the second is
84 * the address at which the program should continue. No registers are
85 * modified, so it is entirely up to the continuation code to figure out
86 * what to do.
87 *
88 * All the routines below use bits of fixup code that are out of line
89 * with the main instruction path. This means when everything is well,
90 * we don't even have to jump over them. Further, they do not intrude
91 * on our cache or tlb entries.
92 */
93
94 struct exception_table_entry
95 {
96 unsigned long insn, fixup;
97 };
98
99 /* Returns 0 if exception not found and fixup otherwise. */
100 extern int fixup_exception(struct pt_regs *regs);
101
102 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
103 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
104
105 /*
106 * The "__xxx" versions do not do address space checking, useful when
107 * doing multiple accesses to the same area (the user has to do the
108 * checks by hand with "access_ok()")
109 */
110 #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
111 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
112
113 struct __large_struct { unsigned long buf[100]; };
114 #define __m(x) (*(struct __large_struct *)(x))
115
116 #define __get_user_nocheck(x, ptr, size) \
117 ({ \
118 unsigned long __gu_addr; \
119 int __gu_err; \
120 __gu_addr = (unsigned long) (ptr); \
121 switch (size) { \
122 case 1: { \
123 unsigned char __gu_val; \
124 __get_user_asm("bu"); \
125 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
126 break; \
127 } \
128 case 2: { \
129 unsigned short __gu_val; \
130 __get_user_asm("hu"); \
131 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
132 break; \
133 } \
134 case 4: { \
135 unsigned int __gu_val; \
136 __get_user_asm(""); \
137 (x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
138 break; \
139 } \
140 default: \
141 __get_user_unknown(); \
142 break; \
143 } \
144 __gu_err; \
145 })
146
147 #define __get_user_check(x, ptr, size) \
148 ({ \
149 const __typeof__(*(ptr))* __guc_ptr = (ptr); \
150 int _e; \
151 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
152 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
153 else { \
154 _e = -EFAULT; \
155 (x) = (__typeof__(x))0; \
156 } \
157 _e; \
158 })
159
160 #define __get_user_asm(INSN) \
161 ({ \
162 asm volatile( \
163 "1:\n" \
164 " mov"INSN" %2,%1\n" \
165 " mov 0,%0\n" \
166 "2:\n" \
167 " .section .fixup,\"ax\"\n" \
168 "3:\n\t" \
169 " mov %3,%0\n" \
170 " jmp 2b\n" \
171 " .previous\n" \
172 " .section __ex_table,\"a\"\n" \
173 " .balign 4\n" \
174 " .long 1b, 3b\n" \
175 " .previous" \
176 : "=&r" (__gu_err), "=&r" (__gu_val) \
177 : "m" (__m(__gu_addr)), "i" (-EFAULT)); \
178 })
179
180 extern int __get_user_unknown(void);
181
182 #define __put_user_nocheck(x, ptr, size) \
183 ({ \
184 union { \
185 __typeof__(*(ptr)) val; \
186 u32 bits[2]; \
187 } __pu_val; \
188 unsigned long __pu_addr; \
189 int __pu_err; \
190 __pu_val.val = (x); \
191 __pu_addr = (unsigned long) (ptr); \
192 switch (size) { \
193 case 1: __put_user_asm("bu"); break; \
194 case 2: __put_user_asm("hu"); break; \
195 case 4: __put_user_asm("" ); break; \
196 case 8: __put_user_asm8(); break; \
197 default: __pu_err = __put_user_unknown(); break; \
198 } \
199 __pu_err; \
200 })
201
202 #define __put_user_check(x, ptr, size) \
203 ({ \
204 union { \
205 __typeof__(*(ptr)) val; \
206 u32 bits[2]; \
207 } __pu_val; \
208 unsigned long __pu_addr; \
209 int __pu_err; \
210 __pu_val.val = (x); \
211 __pu_addr = (unsigned long) (ptr); \
212 if (likely(__access_ok(__pu_addr, size))) { \
213 switch (size) { \
214 case 1: __put_user_asm("bu"); break; \
215 case 2: __put_user_asm("hu"); break; \
216 case 4: __put_user_asm("" ); break; \
217 case 8: __put_user_asm8(); break; \
218 default: __pu_err = __put_user_unknown(); break; \
219 } \
220 } \
221 else { \
222 __pu_err = -EFAULT; \
223 } \
224 __pu_err; \
225 })
226
227 #define __put_user_asm(INSN) \
228 ({ \
229 asm volatile( \
230 "1:\n" \
231 " mov"INSN" %1,%2\n" \
232 " mov 0,%0\n" \
233 "2:\n" \
234 " .section .fixup,\"ax\"\n" \
235 "3:\n" \
236 " mov %3,%0\n" \
237 " jmp 2b\n" \
238 " .previous\n" \
239 " .section __ex_table,\"a\"\n" \
240 " .balign 4\n" \
241 " .long 1b, 3b\n" \
242 " .previous" \
243 : "=&r" (__pu_err) \
244 : "r" (__pu_val.val), "m" (__m(__pu_addr)), \
245 "i" (-EFAULT) \
246 ); \
247 })
248
249 #define __put_user_asm8() \
250 ({ \
251 asm volatile( \
252 "1: mov %1,%3 \n" \
253 "2: mov %2,%4 \n" \
254 " mov 0,%0 \n" \
255 "3: \n" \
256 " .section .fixup,\"ax\" \n" \
257 "4: \n" \
258 " mov %5,%0 \n" \
259 " jmp 3b \n" \
260 " .previous \n" \
261 " .section __ex_table,\"a\"\n" \
262 " .balign 4 \n" \
263 " .long 1b, 4b \n" \
264 " .long 2b, 4b \n" \
265 " .previous \n" \
266 : "=&r" (__pu_err) \
267 : "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]), \
268 "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)), \
269 "i" (-EFAULT) \
270 ); \
271 })
272
273 extern int __put_user_unknown(void);
274
275
276 /*
277 * Copy To/From Userspace
278 */
279 /* Generic arbitrary sized copy. */
280 #define __copy_user(to, from, size) \
281 do { \
282 if (size) { \
283 void *__to = to; \
284 const void *__from = from; \
285 int w; \
286 asm volatile( \
287 "0: movbu (%0),%3;\n" \
288 "1: movbu %3,(%1);\n" \
289 " inc %0;\n" \
290 " inc %1;\n" \
291 " add -1,%2;\n" \
292 " bne 0b;\n" \
293 "2:\n" \
294 " .section .fixup,\"ax\"\n" \
295 "3: jmp 2b\n" \
296 " .previous\n" \
297 " .section __ex_table,\"a\"\n" \
298 " .balign 4\n" \
299 " .long 0b,3b\n" \
300 " .long 1b,3b\n" \
301 " .previous\n" \
302 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
303 : "0"(__from), "1"(__to), "2"(size) \
304 : "cc", "memory"); \
305 } \
306 } while (0)
307
308 #define __copy_user_zeroing(to, from, size) \
309 do { \
310 if (size) { \
311 void *__to = to; \
312 const void *__from = from; \
313 int w; \
314 asm volatile( \
315 "0: movbu (%0),%3;\n" \
316 "1: movbu %3,(%1);\n" \
317 " inc %0;\n" \
318 " inc %1;\n" \
319 " add -1,%2;\n" \
320 " bne 0b;\n" \
321 "2:\n" \
322 " .section .fixup,\"ax\"\n" \
323 "3:\n" \
324 " mov %2,%0\n" \
325 " clr %3\n" \
326 "4: movbu %3,(%1);\n" \
327 " inc %1;\n" \
328 " add -1,%2;\n" \
329 " bne 4b;\n" \
330 " mov %0,%2\n" \
331 " jmp 2b\n" \
332 " .previous\n" \
333 " .section __ex_table,\"a\"\n" \
334 " .balign 4\n" \
335 " .long 0b,3b\n" \
336 " .long 1b,3b\n" \
337 " .previous\n" \
338 : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
339 : "0"(__from), "1"(__to), "2"(size) \
340 : "cc", "memory"); \
341 } \
342 } while (0)
343
344 /* We let the __ versions of copy_from/to_user inline, because they're often
345 * used in fast paths and have only a small space overhead.
346 */
347 static inline
348 unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
349 unsigned long n)
350 {
351 __copy_user_zeroing(to, from, n);
352 return n;
353 }
354
355 static inline
356 unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
357 unsigned long n)
358 {
359 __copy_user(to, from, n);
360 return n;
361 }
362
363
364 #if 0
365 #error "don't use - these macros don't increment to & from pointers"
366 /* Optimize just a little bit when we know the size of the move. */
367 #define __constant_copy_user(to, from, size) \
368 do { \
369 asm volatile( \
370 " mov %0,a0;\n" \
371 "0: movbu (%1),d3;\n" \
372 "1: movbu d3,(%2);\n" \
373 " add -1,a0;\n" \
374 " bne 0b;\n" \
375 "2:;" \
376 ".section .fixup,\"ax\"\n" \
377 "3: jmp 2b\n" \
378 ".previous\n" \
379 ".section __ex_table,\"a\"\n" \
380 " .balign 4\n" \
381 " .long 0b,3b\n" \
382 " .long 1b,3b\n" \
383 ".previous" \
384 : \
385 : "d"(size), "d"(to), "d"(from) \
386 : "d3", "a0"); \
387 } while (0)
388
389 /* Optimize just a little bit when we know the size of the move. */
390 #define __constant_copy_user_zeroing(to, from, size) \
391 do { \
392 asm volatile( \
393 " mov %0,a0;\n" \
394 "0: movbu (%1),d3;\n" \
395 "1: movbu d3,(%2);\n" \
396 " add -1,a0;\n" \
397 " bne 0b;\n" \
398 "2:;" \
399 ".section .fixup,\"ax\"\n" \
400 "3: jmp 2b\n" \
401 ".previous\n" \
402 ".section __ex_table,\"a\"\n" \
403 " .balign 4\n" \
404 " .long 0b,3b\n" \
405 " .long 1b,3b\n" \
406 ".previous" \
407 : \
408 : "d"(size), "d"(to), "d"(from) \
409 : "d3", "a0"); \
410 } while (0)
411
412 static inline
413 unsigned long __constant_copy_to_user(void *to, const void *from,
414 unsigned long n)
415 {
416 if (access_ok(VERIFY_WRITE, to, n))
417 __constant_copy_user(to, from, n);
418 return n;
419 }
420
421 static inline
422 unsigned long __constant_copy_from_user(void *to, const void *from,
423 unsigned long n)
424 {
425 if (access_ok(VERIFY_READ, from, n))
426 __constant_copy_user_zeroing(to, from, n);
427 return n;
428 }
429
430 static inline
431 unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
432 unsigned long n)
433 {
434 __constant_copy_user(to, from, n);
435 return n;
436 }
437
438 static inline
439 unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
440 unsigned long n)
441 {
442 __constant_copy_user_zeroing(to, from, n);
443 return n;
444 }
445 #endif
446
447 extern unsigned long __generic_copy_to_user(void __user *, const void *,
448 unsigned long);
449 extern unsigned long __generic_copy_from_user(void *, const void __user *,
450 unsigned long);
451
452 #define __copy_to_user_inatomic(to, from, n) \
453 __generic_copy_to_user_nocheck((to), (from), (n))
454 #define __copy_from_user_inatomic(to, from, n) \
455 __generic_copy_from_user_nocheck((to), (from), (n))
456
457 #define __copy_to_user(to, from, n) \
458 ({ \
459 might_fault(); \
460 __copy_to_user_inatomic((to), (from), (n)); \
461 })
462
463 #define __copy_from_user(to, from, n) \
464 ({ \
465 might_fault(); \
466 __copy_from_user_inatomic((to), (from), (n)); \
467 })
468
469
470 #define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n))
471 #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
472
473 extern long strncpy_from_user(char *dst, const char __user *src, long count);
474 extern long __strncpy_from_user(char *dst, const char __user *src, long count);
475 extern long strnlen_user(const char __user *str, long n);
476 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
477 extern unsigned long clear_user(void __user *mem, unsigned long len);
478 extern unsigned long __clear_user(void __user *mem, unsigned long len);
479
480 #endif /* _ASM_UACCESS_H */