]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_UACCESS_H |
2 | #define _ASM_IA64_UACCESS_H | |
3 | ||
4 | /* | |
5 | * This file defines various macros to transfer memory areas across | |
6 | * the user/kernel boundary. This needs to be done carefully because | |
7 | * this code is executed in kernel mode and uses user-specified | |
8 | * addresses. Thus, we need to be careful not to let the user to | |
9 | * trick us into accessing kernel memory that would normally be | |
10 | * inaccessible. This code is also fairly performance sensitive, | |
11 | * so we want to spend as little time doing safety checks as | |
12 | * possible. | |
13 | * | |
14 | * To make matters a bit more interesting, these macros sometimes also | |
15 | * called from within the kernel itself, in which case the address | |
16 | * validity check must be skipped. The get_fs() macro tells us what | |
17 | * to do: if get_fs()==USER_DS, checking is performed, if | |
18 | * get_fs()==KERNEL_DS, checking is bypassed. | |
19 | * | |
20 | * Note that even if the memory area specified by the user is in a | |
21 | * valid address range, it is still possible that we'll get a page | |
22 | * fault while accessing it. This is handled by filling out an | |
23 | * exception handler fixup entry for each instruction that has the | |
24 | * potential to fault. When such a fault occurs, the page fault | |
25 | * handler checks to see whether the faulting instruction has a fixup | |
26 | * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and | |
27 | * then resumes execution at the continuation point. | |
28 | * | |
29 | * Based on <asm-alpha/uaccess.h>. | |
30 | * | |
31 | * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co | |
32 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
33 | */ | |
34 | ||
35 | #include <linux/compiler.h> | |
36 | #include <linux/errno.h> | |
37 | #include <linux/sched.h> | |
38 | #include <linux/page-flags.h> | |
39 | #include <linux/mm.h> | |
40 | ||
41 | #include <asm/intrinsics.h> | |
42 | #include <asm/pgtable.h> | |
43 | #include <asm/io.h> | |
44 | ||
45 | /* | |
46 | * For historical reasons, the following macros are grossly misnamed: | |
47 | */ | |
48 | #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ | |
49 | #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ | |
50 | ||
51 | #define VERIFY_READ 0 | |
52 | #define VERIFY_WRITE 1 | |
53 | ||
54 | #define get_ds() (KERNEL_DS) | |
55 | #define get_fs() (current_thread_info()->addr_limit) | |
56 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | |
57 | ||
58 | #define segment_eq(a, b) ((a).seg == (b).seg) | |
59 | ||
60 | /* | |
61 | * When accessing user memory, we need to make sure the entire area really is in | |
62 | * user-level space. In order to do this efficiently, we make sure that the page at | |
63 | * address TASK_SIZE is never valid. We also need to make sure that the address doesn't | |
64 | * point inside the virtually mapped linear page table. | |
65 | */ | |
66 | #define __access_ok(addr, size, segment) \ | |
67 | ({ \ | |
68 | __chk_user_ptr(addr); \ | |
69 | (likely((unsigned long) (addr) <= (segment).seg) \ | |
70 | && ((segment).seg == KERNEL_DS.seg \ | |
71 | || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ | |
72 | }) | |
73 | #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) | |
74 | ||
1da177e4 LT |
75 | /* |
76 | * These are the main single-value transfer routines. They automatically | |
77 | * use the right size if we just have the right pointer type. | |
78 | * | |
79 | * Careful to not | |
80 | * (a) re-use the arguments for side effects (sizeof/typeof is ok) | |
81 | * (b) require any knowledge of processes at this stage | |
82 | */ | |
83 | #define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs()) | |
84 | #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) | |
85 | ||
86 | /* | |
87 | * The "__xxx" versions do not do address space checking, useful when | |
88 | * doing multiple accesses to the same area (the programmer has to do the | |
89 | * checks by hand with "access_ok()") | |
90 | */ | |
91 | #define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) | |
92 | #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | |
93 | ||
94 | extern long __put_user_unaligned_unknown (void); | |
95 | ||
96 | #define __put_user_unaligned(x, ptr) \ | |
97 | ({ \ | |
98 | long __ret; \ | |
99 | switch (sizeof(*(ptr))) { \ | |
100 | case 1: __ret = __put_user((x), (ptr)); break; \ | |
101 | case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \ | |
102 | | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \ | |
103 | case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \ | |
104 | | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \ | |
105 | case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \ | |
106 | | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \ | |
107 | default: __ret = __put_user_unaligned_unknown(); \ | |
108 | } \ | |
109 | __ret; \ | |
110 | }) | |
111 | ||
112 | extern long __get_user_unaligned_unknown (void); | |
113 | ||
114 | #define __get_user_unaligned(x, ptr) \ | |
115 | ({ \ | |
116 | long __ret; \ | |
117 | switch (sizeof(*(ptr))) { \ | |
118 | case 1: __ret = __get_user((x), (ptr)); break; \ | |
119 | case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \ | |
120 | | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \ | |
121 | case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \ | |
122 | | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \ | |
123 | case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \ | |
124 | | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \ | |
125 | default: __ret = __get_user_unaligned_unknown(); \ | |
126 | } \ | |
127 | __ret; \ | |
128 | }) | |
129 | ||
130 | #ifdef ASM_SUPPORTED | |
131 | struct __large_struct { unsigned long buf[100]; }; | |
132 | # define __m(x) (*(struct __large_struct __user *)(x)) | |
133 | ||
134 | /* We need to declare the __ex_table section before we can use it in .xdata. */ | |
135 | asm (".section \"__ex_table\", \"a\"\n\t.previous"); | |
136 | ||
137 | # define __get_user_size(val, addr, n, err) \ | |
138 | do { \ | |
139 | register long __gu_r8 asm ("r8") = 0; \ | |
140 | register long __gu_r9 asm ("r9"); \ | |
141 | asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \ | |
142 | "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \ | |
143 | "[1:]" \ | |
144 | : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \ | |
145 | (err) = __gu_r8; \ | |
146 | (val) = __gu_r9; \ | |
147 | } while (0) | |
148 | ||
149 | /* | |
150 | * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This | |
151 | * is because they do not write to any memory gcc knows about, so there are no aliasing | |
152 | * issues. | |
153 | */ | |
154 | # define __put_user_size(val, addr, n, err) \ | |
155 | do { \ | |
156 | register long __pu_r8 asm ("r8") = 0; \ | |
157 | asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \ | |
158 | "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \ | |
159 | "[1:]" \ | |
160 | : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \ | |
161 | (err) = __pu_r8; \ | |
162 | } while (0) | |
163 | ||
164 | #else /* !ASM_SUPPORTED */ | |
165 | # define RELOC_TYPE 2 /* ip-rel */ | |
166 | # define __get_user_size(val, addr, n, err) \ | |
167 | do { \ | |
168 | __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \ | |
169 | (err) = ia64_getreg(_IA64_REG_R8); \ | |
170 | (val) = ia64_getreg(_IA64_REG_R9); \ | |
171 | } while (0) | |
9605ce7e MT |
172 | # define __put_user_size(val, addr, n, err) \ |
173 | do { \ | |
174 | __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \ | |
175 | (__force unsigned long) (val)); \ | |
176 | (err) = ia64_getreg(_IA64_REG_R8); \ | |
1da177e4 LT |
177 | } while (0) |
178 | #endif /* !ASM_SUPPORTED */ | |
179 | ||
180 | extern void __get_user_unknown (void); | |
181 | ||
182 | /* | |
183 | * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which | |
184 | * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while | |
185 | * using r8/r9. | |
186 | */ | |
187 | #define __do_get_user(check, x, ptr, size, segment) \ | |
188 | ({ \ | |
189 | const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ | |
190 | __typeof__ (size) __gu_size = (size); \ | |
0cc13a54 AV |
191 | long __gu_err = -EFAULT; \ |
192 | unsigned long __gu_val = 0; \ | |
1da177e4 LT |
193 | if (!check || __access_ok(__gu_ptr, size, segment)) \ |
194 | switch (__gu_size) { \ | |
195 | case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ | |
196 | case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ | |
197 | case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \ | |
198 | case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ | |
199 | default: __get_user_unknown(); break; \ | |
200 | } \ | |
a6325e72 | 201 | (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \ |
1da177e4 LT |
202 | __gu_err; \ |
203 | }) | |
204 | ||
205 | #define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS) | |
206 | #define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment) | |
207 | ||
208 | extern void __put_user_unknown (void); | |
209 | ||
210 | /* | |
211 | * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which | |
212 | * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. | |
213 | */ | |
214 | #define __do_put_user(check, x, ptr, size, segment) \ | |
215 | ({ \ | |
216 | __typeof__ (x) __pu_x = (x); \ | |
217 | __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ | |
218 | __typeof__ (size) __pu_size = (size); \ | |
219 | long __pu_err = -EFAULT; \ | |
220 | \ | |
221 | if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \ | |
222 | switch (__pu_size) { \ | |
223 | case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ | |
224 | case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ | |
225 | case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \ | |
226 | case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \ | |
227 | default: __put_user_unknown(); break; \ | |
228 | } \ | |
229 | __pu_err; \ | |
230 | }) | |
231 | ||
232 | #define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS) | |
233 | #define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment) | |
234 | ||
235 | /* | |
236 | * Complex access routines | |
237 | */ | |
238 | extern unsigned long __must_check __copy_user (void __user *to, const void __user *from, | |
239 | unsigned long count); | |
240 | ||
241 | static inline unsigned long | |
242 | __copy_to_user (void __user *to, const void *from, unsigned long count) | |
243 | { | |
0cc13a54 | 244 | return __copy_user(to, (__force void __user *) from, count); |
1da177e4 LT |
245 | } |
246 | ||
247 | static inline unsigned long | |
248 | __copy_from_user (void *to, const void __user *from, unsigned long count) | |
249 | { | |
0cc13a54 | 250 | return __copy_user((__force void __user *) to, from, count); |
1da177e4 LT |
251 | } |
252 | ||
253 | #define __copy_to_user_inatomic __copy_to_user | |
254 | #define __copy_from_user_inatomic __copy_from_user | |
255 | #define copy_to_user(to, from, n) \ | |
256 | ({ \ | |
257 | void __user *__cu_to = (to); \ | |
258 | const void *__cu_from = (from); \ | |
259 | long __cu_len = (n); \ | |
260 | \ | |
261 | if (__access_ok(__cu_to, __cu_len, get_fs())) \ | |
0cc13a54 | 262 | __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ |
1da177e4 LT |
263 | __cu_len; \ |
264 | }) | |
265 | ||
266 | #define copy_from_user(to, from, n) \ | |
267 | ({ \ | |
268 | void *__cu_to = (to); \ | |
269 | const void __user *__cu_from = (from); \ | |
270 | long __cu_len = (n); \ | |
271 | \ | |
272 | __chk_user_ptr(__cu_from); \ | |
273 | if (__access_ok(__cu_from, __cu_len, get_fs())) \ | |
0cc13a54 | 274 | __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ |
1da177e4 LT |
275 | __cu_len; \ |
276 | }) | |
277 | ||
278 | #define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) | |
279 | ||
280 | static inline unsigned long | |
281 | copy_in_user (void __user *to, const void __user *from, unsigned long n) | |
282 | { | |
283 | if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))) | |
284 | n = __copy_user(to, from, n); | |
285 | return n; | |
286 | } | |
287 | ||
288 | extern unsigned long __do_clear_user (void __user *, unsigned long); | |
289 | ||
290 | #define __clear_user(to, n) __do_clear_user(to, n) | |
291 | ||
292 | #define clear_user(to, n) \ | |
293 | ({ \ | |
294 | unsigned long __cu_len = (n); \ | |
295 | if (__access_ok(to, __cu_len, get_fs())) \ | |
296 | __cu_len = __do_clear_user(to, __cu_len); \ | |
297 | __cu_len; \ | |
298 | }) | |
299 | ||
300 | ||
301 | /* | |
302 | * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else | |
303 | * strlen. | |
304 | */ | |
305 | extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len); | |
306 | ||
307 | #define strncpy_from_user(to, from, n) \ | |
308 | ({ \ | |
309 | const char __user * __sfu_from = (from); \ | |
310 | long __sfu_ret = -EFAULT; \ | |
311 | if (__access_ok(__sfu_from, 0, get_fs())) \ | |
312 | __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ | |
313 | __sfu_ret; \ | |
314 | }) | |
315 | ||
316 | /* Returns: 0 if bad, string length+1 (memory size) of string if ok */ | |
317 | extern unsigned long __strlen_user (const char __user *); | |
318 | ||
319 | #define strlen_user(str) \ | |
320 | ({ \ | |
321 | const char __user *__su_str = (str); \ | |
322 | unsigned long __su_ret = 0; \ | |
323 | if (__access_ok(__su_str, 0, get_fs())) \ | |
324 | __su_ret = __strlen_user(__su_str); \ | |
325 | __su_ret; \ | |
326 | }) | |
327 | ||
328 | /* | |
329 | * Returns: 0 if exception before NUL or reaching the supplied limit | |
330 | * (N), a value greater than N if the limit would be exceeded, else | |
331 | * strlen. | |
332 | */ | |
333 | extern unsigned long __strnlen_user (const char __user *, long); | |
334 | ||
335 | #define strnlen_user(str, len) \ | |
336 | ({ \ | |
337 | const char __user *__su_str = (str); \ | |
338 | unsigned long __su_ret = 0; \ | |
339 | if (__access_ok(__su_str, 0, get_fs())) \ | |
340 | __su_ret = __strnlen_user(__su_str, len); \ | |
341 | __su_ret; \ | |
342 | }) | |
343 | ||
344 | /* Generic code can't deal with the location-relative format that we use for compactness. */ | |
345 | #define ARCH_HAS_SORT_EXTABLE | |
346 | #define ARCH_HAS_SEARCH_EXTABLE | |
347 | ||
348 | struct exception_table_entry { | |
349 | int addr; /* location-relative address of insn this fixup is for */ | |
350 | int cont; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */ | |
351 | }; | |
352 | ||
353 | extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e); | |
354 | extern const struct exception_table_entry *search_exception_tables (unsigned long addr); | |
355 | ||
356 | static inline int | |
357 | ia64_done_with_exception (struct pt_regs *regs) | |
358 | { | |
359 | const struct exception_table_entry *e; | |
360 | e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri); | |
361 | if (e) { | |
362 | ia64_handle_exception(regs, e); | |
363 | return 1; | |
364 | } | |
365 | return 0; | |
366 | } | |
367 | ||
368 | #define ARCH_HAS_TRANSLATE_MEM_PTR 1 | |
dc012014 TR |
369 | static __inline__ void * |
370 | xlate_dev_mem_ptr(phys_addr_t p) | |
1da177e4 LT |
371 | { |
372 | struct page *page; | |
dc012014 | 373 | void *ptr; |
1da177e4 LT |
374 | |
375 | page = pfn_to_page(p >> PAGE_SHIFT); | |
376 | if (PageUncached(page)) | |
dc012014 | 377 | ptr = (void *)p + __IA64_UNCACHED_OFFSET; |
1da177e4 LT |
378 | else |
379 | ptr = __va(p); | |
380 | ||
381 | return ptr; | |
382 | } | |
383 | ||
384 | /* | |
385 | * Convert a virtual cached kernel memory pointer to an uncached pointer | |
386 | */ | |
dc012014 TR |
387 | static __inline__ void * |
388 | xlate_dev_kmem_ptr(void *p) | |
1da177e4 LT |
389 | { |
390 | struct page *page; | |
dc012014 | 391 | void *ptr; |
1da177e4 | 392 | |
f2454a1a | 393 | page = virt_to_page((unsigned long)p); |
1da177e4 | 394 | if (PageUncached(page)) |
dc012014 | 395 | ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET; |
1da177e4 LT |
396 | else |
397 | ptr = p; | |
398 | ||
399 | return ptr; | |
400 | } | |
401 | ||
402 | #endif /* _ASM_IA64_UACCESS_H */ |