]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _ASM_IA64_UACCESS_H |
3 | #define _ASM_IA64_UACCESS_H | |
4 | ||
5 | /* | |
6 | * This file defines various macros to transfer memory areas across | |
7 | * the user/kernel boundary. This needs to be done carefully because | |
8 | * this code is executed in kernel mode and uses user-specified | |
9 | * addresses. Thus, we need to be careful not to let the user to | |
10 | * trick us into accessing kernel memory that would normally be | |
11 | * inaccessible. This code is also fairly performance sensitive, | |
12 | * so we want to spend as little time doing safety checks as | |
13 | * possible. | |
14 | * | |
15 | * To make matters a bit more interesting, these macros sometimes also | |
16 | * called from within the kernel itself, in which case the address | |
17 | * validity check must be skipped. The get_fs() macro tells us what | |
18 | * to do: if get_fs()==USER_DS, checking is performed, if | |
19 | * get_fs()==KERNEL_DS, checking is bypassed. | |
20 | * | |
21 | * Note that even if the memory area specified by the user is in a | |
22 | * valid address range, it is still possible that we'll get a page | |
23 | * fault while accessing it. This is handled by filling out an | |
24 | * exception handler fixup entry for each instruction that has the | |
25 | * potential to fault. When such a fault occurs, the page fault | |
26 | * handler checks to see whether the faulting instruction has a fixup | |
27 | * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and | |
28 | * then resumes execution at the continuation point. | |
29 | * | |
30 | * Based on <asm-alpha/uaccess.h>. | |
31 | * | |
32 | * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co | |
33 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
34 | */ | |
35 | ||
36 | #include <linux/compiler.h> | |
1da177e4 LT |
37 | #include <linux/page-flags.h> |
38 | #include <linux/mm.h> | |
39 | ||
40 | #include <asm/intrinsics.h> | |
41 | #include <asm/pgtable.h> | |
42 | #include <asm/io.h> | |
8bec2717 | 43 | #include <asm/extable.h> |
1da177e4 LT |
44 | |
45 | /* | |
46 | * For historical reasons, the following macros are grossly misnamed: | |
47 | */ | |
48 | #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ | |
49 | #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ | |
50 | ||
1da177e4 LT |
51 | #define get_fs() (current_thread_info()->addr_limit) |
52 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | |
53 | ||
54 | #define segment_eq(a, b) ((a).seg == (b).seg) | |
55 | ||
56 | /* | |
57 | * When accessing user memory, we need to make sure the entire area really is in | |
58 | * user-level space. In order to do this efficiently, we make sure that the page at | |
59 | * address TASK_SIZE is never valid. We also need to make sure that the address doesn't | |
60 | * point inside the virtually mapped linear page table. | |
61 | */ | |
7bb8a503 AV |
62 | static inline int __access_ok(const void __user *p, unsigned long size) |
63 | { | |
64 | unsigned long addr = (unsigned long)p; | |
65 | unsigned long seg = get_fs().seg; | |
66 | return likely(addr <= seg) && | |
67 | (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); | |
68 | } | |
96d4f267 | 69 | #define access_ok(addr, size) __access_ok((addr), (size)) |
1da177e4 | 70 | |
1da177e4 LT |
71 | /* |
72 | * These are the main single-value transfer routines. They automatically | |
73 | * use the right size if we just have the right pointer type. | |
74 | * | |
75 | * Careful to not | |
76 | * (a) re-use the arguments for side effects (sizeof/typeof is ok) | |
77 | * (b) require any knowledge of processes at this stage | |
78 | */ | |
1bd5986b AV |
79 | #define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) |
80 | #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) | |
1da177e4 LT |
81 | |
82 | /* | |
83 | * The "__xxx" versions do not do address space checking, useful when | |
84 | * doing multiple accesses to the same area (the programmer has to do the | |
85 | * checks by hand with "access_ok()") | |
86 | */ | |
87 | #define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) | |
88 | #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | |
89 | ||
1da177e4 LT |
90 | #ifdef ASM_SUPPORTED |
91 | struct __large_struct { unsigned long buf[100]; }; | |
92 | # define __m(x) (*(struct __large_struct __user *)(x)) | |
93 | ||
94 | /* We need to declare the __ex_table section before we can use it in .xdata. */ | |
95 | asm (".section \"__ex_table\", \"a\"\n\t.previous"); | |
96 | ||
97 | # define __get_user_size(val, addr, n, err) \ | |
98 | do { \ | |
99 | register long __gu_r8 asm ("r8") = 0; \ | |
100 | register long __gu_r9 asm ("r9"); \ | |
101 | asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \ | |
102 | "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \ | |
103 | "[1:]" \ | |
104 | : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \ | |
105 | (err) = __gu_r8; \ | |
106 | (val) = __gu_r9; \ | |
107 | } while (0) | |
108 | ||
109 | /* | |
110 | * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This | |
111 | * is because they do not write to any memory gcc knows about, so there are no aliasing | |
112 | * issues. | |
113 | */ | |
114 | # define __put_user_size(val, addr, n, err) \ | |
115 | do { \ | |
116 | register long __pu_r8 asm ("r8") = 0; \ | |
117 | asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \ | |
118 | "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \ | |
119 | "[1:]" \ | |
120 | : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \ | |
121 | (err) = __pu_r8; \ | |
122 | } while (0) | |
123 | ||
124 | #else /* !ASM_SUPPORTED */ | |
125 | # define RELOC_TYPE 2 /* ip-rel */ | |
126 | # define __get_user_size(val, addr, n, err) \ | |
127 | do { \ | |
128 | __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \ | |
129 | (err) = ia64_getreg(_IA64_REG_R8); \ | |
130 | (val) = ia64_getreg(_IA64_REG_R9); \ | |
131 | } while (0) | |
9605ce7e MT |
132 | # define __put_user_size(val, addr, n, err) \ |
133 | do { \ | |
134 | __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \ | |
135 | (__force unsigned long) (val)); \ | |
136 | (err) = ia64_getreg(_IA64_REG_R8); \ | |
1da177e4 LT |
137 | } while (0) |
138 | #endif /* !ASM_SUPPORTED */ | |
139 | ||
140 | extern void __get_user_unknown (void); | |
141 | ||
142 | /* | |
143 | * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which | |
144 | * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while | |
145 | * using r8/r9. | |
146 | */ | |
11836ece | 147 | #define __do_get_user(check, x, ptr, size) \ |
1da177e4 LT |
148 | ({ \ |
149 | const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ | |
150 | __typeof__ (size) __gu_size = (size); \ | |
0cc13a54 AV |
151 | long __gu_err = -EFAULT; \ |
152 | unsigned long __gu_val = 0; \ | |
7bb8a503 | 153 | if (!check || __access_ok(__gu_ptr, size)) \ |
1da177e4 LT |
154 | switch (__gu_size) { \ |
155 | case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ | |
156 | case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ | |
157 | case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \ | |
158 | case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ | |
159 | default: __get_user_unknown(); break; \ | |
160 | } \ | |
a6325e72 | 161 | (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \ |
1da177e4 LT |
162 | __gu_err; \ |
163 | }) | |
164 | ||
11836ece AV |
165 | #define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size) |
166 | #define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size) | |
1da177e4 LT |
167 | |
168 | extern void __put_user_unknown (void); | |
169 | ||
170 | /* | |
171 | * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which | |
172 | * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. | |
173 | */ | |
11836ece | 174 | #define __do_put_user(check, x, ptr, size) \ |
1da177e4 LT |
175 | ({ \ |
176 | __typeof__ (x) __pu_x = (x); \ | |
177 | __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ | |
178 | __typeof__ (size) __pu_size = (size); \ | |
179 | long __pu_err = -EFAULT; \ | |
180 | \ | |
7bb8a503 | 181 | if (!check || __access_ok(__pu_ptr, __pu_size)) \ |
1da177e4 LT |
182 | switch (__pu_size) { \ |
183 | case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ | |
184 | case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ | |
185 | case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \ | |
186 | case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \ | |
187 | default: __put_user_unknown(); break; \ | |
188 | } \ | |
189 | __pu_err; \ | |
190 | }) | |
191 | ||
11836ece AV |
192 | #define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size) |
193 | #define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size) | |
1da177e4 LT |
194 | |
195 | /* | |
196 | * Complex access routines | |
197 | */ | |
198 | extern unsigned long __must_check __copy_user (void __user *to, const void __user *from, | |
199 | unsigned long count); | |
200 | ||
201 | static inline unsigned long | |
b3622d32 | 202 | raw_copy_to_user(void __user *to, const void *from, unsigned long count) |
1da177e4 | 203 | { |
0cc13a54 | 204 | return __copy_user(to, (__force void __user *) from, count); |
1da177e4 LT |
205 | } |
206 | ||
207 | static inline unsigned long | |
b3622d32 | 208 | raw_copy_from_user(void *to, const void __user *from, unsigned long count) |
1da177e4 | 209 | { |
0cc13a54 | 210 | return __copy_user((__force void __user *) to, from, count); |
1da177e4 LT |
211 | } |
212 | ||
b3622d32 AV |
213 | #define INLINE_COPY_FROM_USER |
214 | #define INLINE_COPY_TO_USER | |
1da177e4 | 215 | |
1da177e4 LT |
216 | extern unsigned long __do_clear_user (void __user *, unsigned long); |
217 | ||
218 | #define __clear_user(to, n) __do_clear_user(to, n) | |
219 | ||
220 | #define clear_user(to, n) \ | |
221 | ({ \ | |
222 | unsigned long __cu_len = (n); \ | |
7bb8a503 | 223 | if (__access_ok(to, __cu_len)) \ |
1da177e4 LT |
224 | __cu_len = __do_clear_user(to, __cu_len); \ |
225 | __cu_len; \ | |
226 | }) | |
227 | ||
228 | ||
229 | /* | |
230 | * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else | |
231 | * strlen. | |
232 | */ | |
233 | extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len); | |
234 | ||
235 | #define strncpy_from_user(to, from, n) \ | |
236 | ({ \ | |
237 | const char __user * __sfu_from = (from); \ | |
238 | long __sfu_ret = -EFAULT; \ | |
7bb8a503 | 239 | if (__access_ok(__sfu_from, 0)) \ |
1da177e4 LT |
240 | __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ |
241 | __sfu_ret; \ | |
242 | }) | |
243 | ||
1da177e4 LT |
244 | /* |
245 | * Returns: 0 if exception before NUL or reaching the supplied limit | |
246 | * (N), a value greater than N if the limit would be exceeded, else | |
247 | * strlen. | |
248 | */ | |
249 | extern unsigned long __strnlen_user (const char __user *, long); | |
250 | ||
251 | #define strnlen_user(str, len) \ | |
252 | ({ \ | |
253 | const char __user *__su_str = (str); \ | |
254 | unsigned long __su_ret = 0; \ | |
7bb8a503 | 255 | if (__access_ok(__su_str, 0)) \ |
1da177e4 LT |
256 | __su_ret = __strnlen_user(__su_str, len); \ |
257 | __su_ret; \ | |
258 | }) | |
259 | ||
1da177e4 | 260 | #define ARCH_HAS_TRANSLATE_MEM_PTR 1 |
dc012014 TR |
261 | static __inline__ void * |
262 | xlate_dev_mem_ptr(phys_addr_t p) | |
1da177e4 LT |
263 | { |
264 | struct page *page; | |
dc012014 | 265 | void *ptr; |
1da177e4 LT |
266 | |
267 | page = pfn_to_page(p >> PAGE_SHIFT); | |
268 | if (PageUncached(page)) | |
dc012014 | 269 | ptr = (void *)p + __IA64_UNCACHED_OFFSET; |
1da177e4 LT |
270 | else |
271 | ptr = __va(p); | |
272 | ||
273 | return ptr; | |
274 | } | |
275 | ||
276 | /* | |
277 | * Convert a virtual cached kernel memory pointer to an uncached pointer | |
278 | */ | |
dc012014 TR |
279 | static __inline__ void * |
280 | xlate_dev_kmem_ptr(void *p) | |
1da177e4 LT |
281 | { |
282 | struct page *page; | |
dc012014 | 283 | void *ptr; |
1da177e4 | 284 | |
f2454a1a | 285 | page = virt_to_page((unsigned long)p); |
1da177e4 | 286 | if (PageUncached(page)) |
dc012014 | 287 | ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET; |
1da177e4 LT |
288 | else |
289 | ptr = p; | |
290 | ||
291 | return ptr; | |
292 | } | |
293 | ||
294 | #endif /* _ASM_IA64_UACCESS_H */ |