]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $ |
2 | * | |
3 | * User space memory access functions | |
4 | * | |
5 | * Copyright (C) 1999, 2002 Niibe Yutaka | |
6 | * Copyright (C) 2003 Paul Mundt | |
7 | * | |
8 | * Based on: | |
9 | * MIPS implementation version 1.15 by | |
10 | * Copyright (C) 1996, 1997, 1998 by Ralf Baechle | |
11 | * and i386 version. | |
12 | */ | |
13 | #ifndef __ASM_SH_UACCESS_H | |
14 | #define __ASM_SH_UACCESS_H | |
15 | ||
16 | #include <linux/errno.h> | |
17 | #include <linux/sched.h> | |
18 | ||
19 | /* | |
20 | * NOTE: Macro/functions in this file depends on threads_info.h implementation. | |
21 | * Assumes: | |
22 | * TI_FLAGS == 8 | |
23 | * TIF_USERSPACE == 31 | |
24 | * USER_ADDR_LIMIT == 0x80000000 | |
25 | */ | |
26 | ||
27 | #define VERIFY_READ 0 | |
28 | #define VERIFY_WRITE 1 | |
29 | ||
30 | typedef struct { | |
31 | unsigned int is_user_space; | |
32 | } mm_segment_t; | |
33 | ||
34 | /* | |
35 | * The fs value determines whether argument validity checking should be | |
36 | * performed or not. If get_fs() == USER_DS, checking is performed, with | |
37 | * get_fs() == KERNEL_DS, checking is bypassed. | |
38 | * | |
39 | * For historical reasons (Data Segment Register?), these macros are misnamed. | |
40 | */ | |
41 | ||
42 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | |
43 | #define segment_eq(a,b) ((a).is_user_space == (b).is_user_space) | |
44 | ||
45 | #define USER_ADDR_LIMIT 0x80000000 | |
46 | ||
47 | #define KERNEL_DS MAKE_MM_SEG(0) | |
48 | #define USER_DS MAKE_MM_SEG(1) | |
49 | ||
50 | #define get_ds() (KERNEL_DS) | |
51 | ||
52 | #if !defined(CONFIG_MMU) | |
53 | static inline mm_segment_t get_fs(void) | |
54 | { | |
55 | return USER_DS; | |
56 | } | |
57 | ||
58 | static inline void set_fs(mm_segment_t s) | |
59 | { | |
60 | } | |
61 | ||
62 | /* | |
63 | * __access_ok: Check if address with size is OK or not. | |
64 | * | |
65 | * If we don't have an MMU (or if its disabled) the only thing we really have | |
66 | * to look out for is if the address resides somewhere outside of what | |
67 | * available RAM we have. | |
68 | * | |
69 | * TODO: This check could probably also stand to be restricted somewhat more.. | |
70 | * though it still does the Right Thing(tm) for the time being. | |
71 | */ | |
72 | static inline int __access_ok(unsigned long addr, unsigned long size) | |
73 | { | |
74 | extern unsigned long memory_start, memory_end; | |
75 | ||
76 | return ((addr >= memory_start) && ((addr + size) < memory_end)); | |
77 | } | |
78 | #else /* CONFIG_MMU */ | |
79 | static inline mm_segment_t get_fs(void) | |
80 | { | |
81 | return MAKE_MM_SEG(test_thread_flag(TIF_USERSPACE)); | |
82 | } | |
83 | ||
84 | static inline void set_fs(mm_segment_t s) | |
85 | { | |
86 | unsigned long ti, flag; | |
87 | __asm__ __volatile__( | |
88 | "stc r7_bank, %0\n\t" | |
89 | "mov.l @(8,%0), %1\n\t" | |
90 | "shal %1\n\t" | |
91 | "cmp/pl %2\n\t" | |
92 | "rotcr %1\n\t" | |
93 | "mov.l %1, @(8,%0)" | |
94 | : "=&r" (ti), "=&r" (flag) | |
95 | : "r" (s.is_user_space) | |
96 | : "t"); | |
97 | /**** | |
98 | if (s.is_user_space) | |
99 | set_thread_flag(TIF_USERSPACE); | |
100 | else | |
101 | clear_thread_flag(TIF_USERSPACE); | |
102 | ****/ | |
103 | } | |
104 | ||
105 | /* | |
106 | * __access_ok: Check if address with size is OK or not. | |
107 | * | |
108 | * We do three checks: | |
109 | * (1) is it user space? | |
110 | * (2) addr + size --> carry? | |
111 | * (3) addr + size >= 0x80000000 (USER_ADDR_LIMIT) | |
112 | * | |
113 | * (1) (2) (3) | RESULT | |
114 | * 0 0 0 | ok | |
115 | * 0 0 1 | ok | |
116 | * 0 1 0 | bad | |
117 | * 0 1 1 | bad | |
118 | * 1 0 0 | ok | |
119 | * 1 0 1 | bad | |
120 | * 1 1 0 | bad | |
121 | * 1 1 1 | bad | |
122 | */ | |
123 | static inline int __access_ok(unsigned long addr, unsigned long size) | |
124 | { | |
125 | unsigned long flag, tmp; | |
126 | ||
127 | __asm__("stc r7_bank, %0\n\t" | |
128 | "mov.l @(8,%0), %0\n\t" | |
129 | "clrt\n\t" | |
130 | "addc %2, %1\n\t" | |
131 | "and %1, %0\n\t" | |
132 | "rotcl %0\n\t" | |
133 | "rotcl %0\n\t" | |
134 | "and #3, %0" | |
135 | : "=&z" (flag), "=r" (tmp) | |
136 | : "r" (addr), "1" (size) | |
137 | : "t"); | |
138 | ||
139 | return flag == 0; | |
140 | } | |
141 | #endif /* CONFIG_MMU */ | |
142 | ||
143 | static inline int access_ok(int type, const void __user *p, unsigned long size) | |
144 | { | |
145 | unsigned long addr = (unsigned long)p; | |
146 | return __access_ok(addr, size); | |
147 | } | |
148 | ||
1da177e4 LT |
149 | /* |
150 | * Uh, these should become the main single-value transfer routines ... | |
151 | * They automatically use the right size if we just have the right | |
152 | * pointer type ... | |
153 | * | |
154 | * As SuperH uses the same address space for kernel and user data, we | |
155 | * can just do these as direct assignments. | |
156 | * | |
157 | * Careful to not | |
158 | * (a) re-use the arguments for side effects (sizeof is ok) | |
159 | * (b) require any knowledge of processes at this stage | |
160 | */ | |
161 | #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) | |
162 | #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) | |
163 | ||
164 | /* | |
165 | * The "__xxx" versions do not do address space checking, useful when | |
166 | * doing multiple accesses to the same area (the user has to do the | |
167 | * checks by hand with "access_ok()") | |
168 | */ | |
169 | #define __put_user(x,ptr) \ | |
170 | __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) | |
171 | #define __get_user(x,ptr) \ | |
172 | __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | |
173 | ||
174 | struct __large_struct { unsigned long buf[100]; }; | |
175 | #define __m(x) (*(struct __large_struct *)(x)) | |
176 | ||
177 | #define __get_user_size(x,ptr,size,retval) \ | |
178 | do { \ | |
179 | retval = 0; \ | |
180 | switch (size) { \ | |
181 | case 1: \ | |
182 | __get_user_asm(x, ptr, retval, "b"); \ | |
183 | break; \ | |
184 | case 2: \ | |
185 | __get_user_asm(x, ptr, retval, "w"); \ | |
186 | break; \ | |
187 | case 4: \ | |
188 | __get_user_asm(x, ptr, retval, "l"); \ | |
189 | break; \ | |
190 | default: \ | |
191 | __get_user_unknown(); \ | |
192 | break; \ | |
193 | } \ | |
194 | } while (0) | |
195 | ||
196 | #define __get_user_nocheck(x,ptr,size) \ | |
197 | ({ \ | |
198 | long __gu_err, __gu_val; \ | |
199 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | |
200 | (x) = (__typeof__(*(ptr)))__gu_val; \ | |
201 | __gu_err; \ | |
202 | }) | |
203 | ||
204 | #define __get_user_check(x,ptr,size) \ | |
205 | ({ \ | |
206 | long __gu_err, __gu_val; \ | |
207 | switch (size) { \ | |
208 | case 1: \ | |
209 | __get_user_1(__gu_val, (ptr), __gu_err); \ | |
210 | break; \ | |
211 | case 2: \ | |
212 | __get_user_2(__gu_val, (ptr), __gu_err); \ | |
213 | break; \ | |
214 | case 4: \ | |
215 | __get_user_4(__gu_val, (ptr), __gu_err); \ | |
216 | break; \ | |
217 | default: \ | |
218 | __get_user_unknown(); \ | |
219 | break; \ | |
220 | } \ | |
221 | \ | |
222 | (x) = (__typeof__(*(ptr)))__gu_val; \ | |
223 | __gu_err; \ | |
224 | }) | |
225 | ||
226 | #define __get_user_1(x,addr,err) ({ \ | |
227 | __asm__("stc r7_bank, %1\n\t" \ | |
228 | "mov.l @(8,%1), %1\n\t" \ | |
229 | "and %2, %1\n\t" \ | |
230 | "cmp/pz %1\n\t" \ | |
231 | "bt/s 1f\n\t" \ | |
232 | " mov #0, %0\n\t" \ | |
233 | "0:\n" \ | |
234 | "mov #-14, %0\n\t" \ | |
235 | "bra 2f\n\t" \ | |
236 | " mov #0, %1\n" \ | |
237 | "1:\n\t" \ | |
238 | "mov.b @%2, %1\n\t" \ | |
239 | "extu.b %1, %1\n" \ | |
240 | "2:\n" \ | |
241 | ".section __ex_table,\"a\"\n\t" \ | |
242 | ".long 1b, 0b\n\t" \ | |
243 | ".previous" \ | |
244 | : "=&r" (err), "=&r" (x) \ | |
245 | : "r" (addr) \ | |
246 | : "t"); \ | |
247 | }) | |
248 | ||
249 | #define __get_user_2(x,addr,err) ({ \ | |
250 | __asm__("stc r7_bank, %1\n\t" \ | |
251 | "mov.l @(8,%1), %1\n\t" \ | |
252 | "and %2, %1\n\t" \ | |
253 | "cmp/pz %1\n\t" \ | |
254 | "bt/s 1f\n\t" \ | |
255 | " mov #0, %0\n\t" \ | |
256 | "0:\n" \ | |
257 | "mov #-14, %0\n\t" \ | |
258 | "bra 2f\n\t" \ | |
259 | " mov #0, %1\n" \ | |
260 | "1:\n\t" \ | |
261 | "mov.w @%2, %1\n\t" \ | |
262 | "extu.w %1, %1\n" \ | |
263 | "2:\n" \ | |
264 | ".section __ex_table,\"a\"\n\t" \ | |
265 | ".long 1b, 0b\n\t" \ | |
266 | ".previous" \ | |
267 | : "=&r" (err), "=&r" (x) \ | |
268 | : "r" (addr) \ | |
269 | : "t"); \ | |
270 | }) | |
271 | ||
272 | #define __get_user_4(x,addr,err) ({ \ | |
273 | __asm__("stc r7_bank, %1\n\t" \ | |
274 | "mov.l @(8,%1), %1\n\t" \ | |
275 | "and %2, %1\n\t" \ | |
276 | "cmp/pz %1\n\t" \ | |
277 | "bt/s 1f\n\t" \ | |
278 | " mov #0, %0\n\t" \ | |
279 | "0:\n" \ | |
280 | "mov #-14, %0\n\t" \ | |
281 | "bra 2f\n\t" \ | |
282 | " mov #0, %1\n" \ | |
283 | "1:\n\t" \ | |
284 | "mov.l @%2, %1\n\t" \ | |
285 | "2:\n" \ | |
286 | ".section __ex_table,\"a\"\n\t" \ | |
287 | ".long 1b, 0b\n\t" \ | |
288 | ".previous" \ | |
289 | : "=&r" (err), "=&r" (x) \ | |
290 | : "r" (addr) \ | |
291 | : "t"); \ | |
292 | }) | |
293 | ||
294 | #define __get_user_asm(x, addr, err, insn) \ | |
295 | ({ \ | |
296 | __asm__ __volatile__( \ | |
297 | "1:\n\t" \ | |
298 | "mov." insn " %2, %1\n\t" \ | |
299 | "mov #0, %0\n" \ | |
300 | "2:\n" \ | |
301 | ".section .fixup,\"ax\"\n" \ | |
302 | "3:\n\t" \ | |
303 | "mov #0, %1\n\t" \ | |
304 | "mov.l 4f, %0\n\t" \ | |
305 | "jmp @%0\n\t" \ | |
306 | " mov %3, %0\n" \ | |
307 | "4: .long 2b\n\t" \ | |
308 | ".previous\n" \ | |
309 | ".section __ex_table,\"a\"\n\t" \ | |
310 | ".long 1b, 3b\n\t" \ | |
311 | ".previous" \ | |
312 | :"=&r" (err), "=&r" (x) \ | |
313 | :"m" (__m(addr)), "i" (-EFAULT)); }) | |
314 | ||
315 | extern void __get_user_unknown(void); | |
316 | ||
317 | #define __put_user_size(x,ptr,size,retval) \ | |
318 | do { \ | |
319 | retval = 0; \ | |
320 | switch (size) { \ | |
321 | case 1: \ | |
322 | __put_user_asm(x, ptr, retval, "b"); \ | |
323 | break; \ | |
324 | case 2: \ | |
325 | __put_user_asm(x, ptr, retval, "w"); \ | |
326 | break; \ | |
327 | case 4: \ | |
328 | __put_user_asm(x, ptr, retval, "l"); \ | |
329 | break; \ | |
330 | case 8: \ | |
331 | __put_user_u64(x, ptr, retval); \ | |
332 | break; \ | |
333 | default: \ | |
334 | __put_user_unknown(); \ | |
335 | } \ | |
336 | } while (0) | |
337 | ||
338 | #define __put_user_nocheck(x,ptr,size) \ | |
339 | ({ \ | |
340 | long __pu_err; \ | |
341 | __put_user_size((x),(ptr),(size),__pu_err); \ | |
342 | __pu_err; \ | |
343 | }) | |
344 | ||
345 | #define __put_user_check(x,ptr,size) \ | |
346 | ({ \ | |
347 | long __pu_err = -EFAULT; \ | |
348 | __typeof__(*(ptr)) *__pu_addr = (ptr); \ | |
349 | \ | |
350 | if (__access_ok((unsigned long)__pu_addr,size)) \ | |
351 | __put_user_size((x),__pu_addr,(size),__pu_err); \ | |
352 | __pu_err; \ | |
353 | }) | |
354 | ||
355 | #define __put_user_asm(x, addr, err, insn) \ | |
356 | ({ \ | |
357 | __asm__ __volatile__( \ | |
358 | "1:\n\t" \ | |
359 | "mov." insn " %1, %2\n\t" \ | |
360 | "mov #0, %0\n" \ | |
361 | "2:\n" \ | |
362 | ".section .fixup,\"ax\"\n" \ | |
363 | "3:\n\t" \ | |
364 | "nop\n\t" \ | |
365 | "mov.l 4f, %0\n\t" \ | |
366 | "jmp @%0\n\t" \ | |
367 | "mov %3, %0\n" \ | |
368 | "4: .long 2b\n\t" \ | |
369 | ".previous\n" \ | |
370 | ".section __ex_table,\"a\"\n\t" \ | |
371 | ".long 1b, 3b\n\t" \ | |
372 | ".previous" \ | |
373 | :"=&r" (err) \ | |
374 | :"r" (x), "m" (__m(addr)), "i" (-EFAULT) \ | |
375 | :"memory"); }) | |
376 | ||
377 | #if defined(__LITTLE_ENDIAN__) | |
378 | #define __put_user_u64(val,addr,retval) \ | |
379 | ({ \ | |
380 | __asm__ __volatile__( \ | |
381 | "1:\n\t" \ | |
382 | "mov.l %R1,%2\n\t" \ | |
383 | "mov.l %S1,%T2\n\t" \ | |
384 | "mov #0,%0\n" \ | |
385 | "2:\n" \ | |
386 | ".section .fixup,\"ax\"\n" \ | |
387 | "3:\n\t" \ | |
388 | "nop\n\t" \ | |
389 | "mov.l 4f,%0\n\t" \ | |
390 | "jmp @%0\n\t" \ | |
391 | " mov %3,%0\n" \ | |
392 | "4: .long 2b\n\t" \ | |
393 | ".previous\n" \ | |
394 | ".section __ex_table,\"a\"\n\t" \ | |
395 | ".long 1b, 3b\n\t" \ | |
396 | ".previous" \ | |
397 | : "=r" (retval) \ | |
398 | : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \ | |
399 | : "memory"); }) | |
400 | #else | |
401 | #define __put_user_u64(val,addr,retval) \ | |
402 | ({ \ | |
403 | __asm__ __volatile__( \ | |
404 | "1:\n\t" \ | |
405 | "mov.l %S1,%2\n\t" \ | |
406 | "mov.l %R1,%T2\n\t" \ | |
407 | "mov #0,%0\n" \ | |
408 | "2:\n" \ | |
409 | ".section .fixup,\"ax\"\n" \ | |
410 | "3:\n\t" \ | |
411 | "nop\n\t" \ | |
412 | "mov.l 4f,%0\n\t" \ | |
413 | "jmp @%0\n\t" \ | |
414 | " mov %3,%0\n" \ | |
415 | "4: .long 2b\n\t" \ | |
416 | ".previous\n" \ | |
417 | ".section __ex_table,\"a\"\n\t" \ | |
418 | ".long 1b, 3b\n\t" \ | |
419 | ".previous" \ | |
420 | : "=r" (retval) \ | |
421 | : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \ | |
422 | : "memory"); }) | |
423 | #endif | |
424 | ||
425 | extern void __put_user_unknown(void); | |
426 | \f | |
427 | /* Generic arbitrary sized copy. */ | |
428 | /* Return the number of bytes NOT copied */ | |
429 | extern __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); | |
430 | ||
431 | #define copy_to_user(to,from,n) ({ \ | |
432 | void *__copy_to = (void *) (to); \ | |
433 | __kernel_size_t __copy_size = (__kernel_size_t) (n); \ | |
434 | __kernel_size_t __copy_res; \ | |
435 | if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ | |
436 | __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ | |
437 | } else __copy_res = __copy_size; \ | |
438 | __copy_res; }) | |
439 | ||
440 | #define __copy_to_user(to,from,n) \ | |
441 | __copy_user((void *)(to), \ | |
442 | (void *)(from), n) | |
443 | ||
444 | #define __copy_to_user_inatomic __copy_to_user | |
445 | #define __copy_from_user_inatomic __copy_from_user | |
446 | ||
447 | ||
448 | #define copy_from_user(to,from,n) ({ \ | |
449 | void *__copy_to = (void *) (to); \ | |
450 | void *__copy_from = (void *) (from); \ | |
451 | __kernel_size_t __copy_size = (__kernel_size_t) (n); \ | |
452 | __kernel_size_t __copy_res; \ | |
453 | if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ | |
454 | __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ | |
455 | } else __copy_res = __copy_size; \ | |
456 | __copy_res; }) | |
457 | ||
458 | #define __copy_from_user(to,from,n) \ | |
459 | __copy_user((void *)(to), \ | |
460 | (void *)(from), n) | |
461 | ||
462 | /* | |
463 | * Clear the area and return remaining number of bytes | |
464 | * (on failure. Usually it's 0.) | |
465 | */ | |
466 | extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size); | |
467 | ||
468 | #define clear_user(addr,n) ({ \ | |
469 | void * __cl_addr = (addr); \ | |
470 | unsigned long __cl_size = (n); \ | |
471 | if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \ | |
472 | __cl_size = __clear_user(__cl_addr, __cl_size); \ | |
473 | __cl_size; }) | |
474 | ||
475 | static __inline__ int | |
476 | __strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) | |
477 | { | |
478 | __kernel_size_t res; | |
479 | unsigned long __dummy, _d, _s; | |
480 | ||
481 | __asm__ __volatile__( | |
482 | "9:\n" | |
483 | "mov.b @%2+, %1\n\t" | |
484 | "cmp/eq #0, %1\n\t" | |
485 | "bt/s 2f\n" | |
486 | "1:\n" | |
487 | "mov.b %1, @%3\n\t" | |
488 | "dt %7\n\t" | |
489 | "bf/s 9b\n\t" | |
490 | " add #1, %3\n\t" | |
491 | "2:\n\t" | |
492 | "sub %7, %0\n" | |
493 | "3:\n" | |
494 | ".section .fixup,\"ax\"\n" | |
495 | "4:\n\t" | |
496 | "mov.l 5f, %1\n\t" | |
497 | "jmp @%1\n\t" | |
498 | " mov %8, %0\n\t" | |
499 | ".balign 4\n" | |
500 | "5: .long 3b\n" | |
501 | ".previous\n" | |
502 | ".section __ex_table,\"a\"\n" | |
503 | " .balign 4\n" | |
504 | " .long 9b,4b\n" | |
505 | ".previous" | |
506 | : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d) | |
507 | : "0" (__count), "2" (__src), "3" (__dest), "r" (__count), | |
508 | "i" (-EFAULT) | |
509 | : "memory", "t"); | |
510 | ||
511 | return res; | |
512 | } | |
513 | ||
514 | #define strncpy_from_user(dest,src,count) ({ \ | |
515 | unsigned long __sfu_src = (unsigned long) (src); \ | |
516 | int __sfu_count = (int) (count); \ | |
517 | long __sfu_res = -EFAULT; \ | |
518 | if(__access_ok(__sfu_src, __sfu_count)) { \ | |
519 | __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ | |
520 | } __sfu_res; }) | |
521 | ||
522 | /* | |
523 | * Return the size of a string (including the ending 0!) | |
524 | */ | |
525 | static __inline__ long __strnlen_user(const char __user *__s, long __n) | |
526 | { | |
527 | unsigned long res; | |
528 | unsigned long __dummy; | |
529 | ||
530 | __asm__ __volatile__( | |
531 | "9:\n" | |
532 | "cmp/eq %4, %0\n\t" | |
533 | "bt 2f\n" | |
534 | "1:\t" | |
535 | "mov.b @(%0,%3), %1\n\t" | |
536 | "tst %1, %1\n\t" | |
537 | "bf/s 9b\n\t" | |
538 | " add #1, %0\n" | |
539 | "2:\n" | |
540 | ".section .fixup,\"ax\"\n" | |
541 | "3:\n\t" | |
542 | "mov.l 4f, %1\n\t" | |
543 | "jmp @%1\n\t" | |
544 | " mov %5, %0\n" | |
545 | ".balign 4\n" | |
546 | "4: .long 2b\n" | |
547 | ".previous\n" | |
548 | ".section __ex_table,\"a\"\n" | |
549 | " .balign 4\n" | |
550 | " .long 1b,3b\n" | |
551 | ".previous" | |
552 | : "=z" (res), "=&r" (__dummy) | |
553 | : "0" (0), "r" (__s), "r" (__n), "i" (-EFAULT) | |
554 | : "t"); | |
555 | return res; | |
556 | } | |
557 | ||
558 | static __inline__ long strnlen_user(const char __user *s, long n) | |
559 | { | |
560 | if (!access_ok(VERIFY_READ, s, n)) | |
561 | return 0; | |
562 | else | |
563 | return __strnlen_user(s, n); | |
564 | } | |
565 | ||
566 | static __inline__ long strlen_user(const char __user *s) | |
567 | { | |
568 | if (!access_ok(VERIFY_READ, s, 0)) | |
569 | return 0; | |
570 | else | |
571 | return __strnlen_user(s, ~0UL >> 1); | |
572 | } | |
573 | ||
574 | /* | |
575 | * The exception table consists of pairs of addresses: the first is the | |
576 | * address of an instruction that is allowed to fault, and the second is | |
577 | * the address at which the program should continue. No registers are | |
578 | * modified, so it is entirely up to the continuation code to figure out | |
579 | * what to do. | |
580 | * | |
581 | * All the routines below use bits of fixup code that are out of line | |
582 | * with the main instruction path. This means when everything is well, | |
583 | * we don't even have to jump over them. Further, they do not intrude | |
584 | * on our cache or tlb entries. | |
585 | */ | |
586 | ||
587 | struct exception_table_entry | |
588 | { | |
589 | unsigned long insn, fixup; | |
590 | }; | |
591 | ||
592 | extern int fixup_exception(struct pt_regs *regs); | |
593 | ||
594 | #endif /* __ASM_SH_UACCESS_H */ |