]>
Commit | Line | Data |
---|---|---|
f5e706ad SR |
1 | #ifndef _ASM_UACCESS_H |
2 | #define _ASM_UACCESS_H | |
3 | ||
4 | /* | |
5 | * User space memory access functions | |
6 | */ | |
7 | ||
8 | #ifdef __KERNEL__ | |
fb34035e | 9 | #include <linux/errno.h> |
f5e706ad | 10 | #include <linux/compiler.h> |
f5e706ad | 11 | #include <linux/string.h> |
8abf9196 | 12 | #include <linux/thread_info.h> |
f5e706ad | 13 | #include <asm/asi.h> |
f5e706ad | 14 | #include <asm/spitfire.h> |
5b17e1cd | 15 | #include <asm-generic/uaccess-unaligned.h> |
c99d2abd | 16 | #include <asm/extable_64.h> |
f5e706ad SR |
17 | #endif |
18 | ||
19 | #ifndef __ASSEMBLY__ | |
20 | ||
2c66f623 DM |
21 | #include <asm/processor.h> |
22 | ||
f5e706ad SR |
23 | /* |
24 | * Sparc64 is segmented, though more like the M68K than the I386. | |
25 | * We use the secondary ASI to address user memory, which references a | |
26 | * completely different VM map, thus there is zero chance of the user | |
27 | * doing something queer and tricking us into poking kernel memory. | |
28 | * | |
29 | * What is left here is basically what is needed for the other parts of | |
30 | * the kernel that expect to be able to manipulate, erum, "segments". | |
31 | * Or perhaps more properly, permissions. | |
32 | * | |
33 | * "For historical reasons, these macros are grossly misnamed." -Linus | |
34 | */ | |
35 | ||
36 | #define KERNEL_DS ((mm_segment_t) { ASI_P }) | |
37 | #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */ | |
38 | ||
39 | #define VERIFY_READ 0 | |
40 | #define VERIFY_WRITE 1 | |
41 | ||
dff933da | 42 | #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) |
f5e706ad SR |
43 | #define get_ds() (KERNEL_DS) |
44 | ||
7185820a | 45 | #define segment_eq(a, b) ((a).seg == (b).seg) |
f5e706ad SR |
46 | |
47 | #define set_fs(val) \ | |
48 | do { \ | |
7185820a | 49 | current_thread_info()->current_ds = (val).seg; \ |
f5e706ad SR |
50 | __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ |
51 | } while(0) | |
52 | ||
b69fb769 DA |
53 | /* |
54 | * Test whether a block of memory is a valid user space address. | |
55 | * Returns 0 if the range is valid, nonzero otherwise. | |
56 | */ | |
57 | static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit) | |
58 | { | |
59 | if (__builtin_constant_p(size)) | |
60 | return addr > limit - size; | |
61 | ||
62 | addr += size; | |
63 | if (addr < size) | |
64 | return true; | |
65 | ||
66 | return addr > limit; | |
67 | } | |
68 | ||
69 | #define __range_not_ok(addr, size, limit) \ | |
70 | ({ \ | |
71 | __chk_user_ptr(addr); \ | |
72 | __chk_range_not_ok((unsigned long __force)(addr), size, limit); \ | |
73 | }) | |
74 | ||
f5e706ad SR |
75 | static inline int __access_ok(const void __user * addr, unsigned long size) |
76 | { | |
77 | return 1; | |
78 | } | |
79 | ||
80 | static inline int access_ok(int type, const void __user * addr, unsigned long size) | |
81 | { | |
82 | return 1; | |
83 | } | |
84 | ||
f05a6865 | 85 | void __retl_efault(void); |
f5e706ad SR |
86 | |
87 | /* Uh, these should become the main single-value transfer routines.. | |
88 | * They automatically use the right size if we just have the right | |
89 | * pointer type.. | |
90 | * | |
91 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | |
92 | * and yet we don't want to do any pointers, because that is too much | |
93 | * of a performance impact. Thus we have a few rather ugly macros here, | |
94 | * and hide all the ugliness from the user. | |
95 | */ | |
7185820a MT |
96 | #define put_user(x, ptr) ({ \ |
97 | unsigned long __pu_addr = (unsigned long)(ptr); \ | |
98 | __chk_user_ptr(ptr); \ | |
99 | __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\ | |
100 | }) | |
f5e706ad | 101 | |
7185820a MT |
102 | #define get_user(x, ptr) ({ \ |
103 | unsigned long __gu_addr = (unsigned long)(ptr); \ | |
104 | __chk_user_ptr(ptr); \ | |
105 | __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\ | |
106 | }) | |
f5e706ad | 107 | |
7185820a MT |
108 | #define __put_user(x, ptr) put_user(x, ptr) |
109 | #define __get_user(x, ptr) get_user(x, ptr) | |
f5e706ad SR |
110 | |
111 | struct __large_struct { unsigned long buf[100]; }; | |
112 | #define __m(x) ((struct __large_struct *)(x)) | |
113 | ||
4b636ba2 MT |
114 | #define __put_user_nocheck(data, addr, size) ({ \ |
115 | register int __pu_ret; \ | |
116 | switch (size) { \ | |
117 | case 1: __put_user_asm(data, b, addr, __pu_ret); break; \ | |
118 | case 2: __put_user_asm(data, h, addr, __pu_ret); break; \ | |
119 | case 4: __put_user_asm(data, w, addr, __pu_ret); break; \ | |
120 | case 8: __put_user_asm(data, x, addr, __pu_ret); break; \ | |
121 | default: __pu_ret = __put_user_bad(); break; \ | |
122 | } \ | |
123 | __pu_ret; \ | |
7185820a MT |
124 | }) |
125 | ||
126 | #define __put_user_asm(x, size, addr, ret) \ | |
f5e706ad | 127 | __asm__ __volatile__( \ |
7185820a MT |
128 | "/* Put user asm, inline. */\n" \ |
129 | "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ | |
130 | "clr %0\n" \ | |
131 | "2:\n\n\t" \ | |
132 | ".section .fixup,#alloc,#execinstr\n\t" \ | |
133 | ".align 4\n" \ | |
134 | "3:\n\t" \ | |
135 | "sethi %%hi(2b), %0\n\t" \ | |
136 | "jmpl %0 + %%lo(2b), %%g0\n\t" \ | |
137 | " mov %3, %0\n\n\t" \ | |
138 | ".previous\n\t" \ | |
139 | ".section __ex_table,\"a\"\n\t" \ | |
140 | ".align 4\n\t" \ | |
141 | ".word 1b, 3b\n\t" \ | |
142 | ".previous\n\n\t" \ | |
143 | : "=r" (ret) : "r" (x), "r" (__m(addr)), \ | |
144 | "i" (-EFAULT)) | |
f5e706ad | 145 | |
f05a6865 | 146 | int __put_user_bad(void); |
f5e706ad | 147 | |
4b636ba2 MT |
148 | #define __get_user_nocheck(data, addr, size, type) ({ \ |
149 | register int __gu_ret; \ | |
150 | register unsigned long __gu_val; \ | |
151 | switch (size) { \ | |
152 | case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ | |
153 | case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ | |
154 | case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \ | |
155 | case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \ | |
156 | default: \ | |
157 | __gu_val = 0; \ | |
158 | __gu_ret = __get_user_bad(); \ | |
159 | break; \ | |
160 | } \ | |
161 | data = (__force type) __gu_val; \ | |
162 | __gu_ret; \ | |
7185820a MT |
163 | }) |
164 | ||
7185820a | 165 | #define __get_user_asm(x, size, addr, ret) \ |
f5e706ad | 166 | __asm__ __volatile__( \ |
7185820a MT |
167 | "/* Get user asm, inline. */\n" \ |
168 | "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ | |
169 | "clr %0\n" \ | |
170 | "2:\n\n\t" \ | |
171 | ".section .fixup,#alloc,#execinstr\n\t" \ | |
172 | ".align 4\n" \ | |
173 | "3:\n\t" \ | |
174 | "sethi %%hi(2b), %0\n\t" \ | |
175 | "clr %1\n\t" \ | |
176 | "jmpl %0 + %%lo(2b), %%g0\n\t" \ | |
177 | " mov %3, %0\n\n\t" \ | |
178 | ".previous\n\t" \ | |
179 | ".section __ex_table,\"a\"\n\t" \ | |
180 | ".align 4\n\t" \ | |
181 | ".word 1b, 3b\n\n\t" \ | |
182 | ".previous\n\t" \ | |
183 | : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ | |
184 | "i" (-EFAULT)) | |
185 | ||
f05a6865 | 186 | int __get_user_bad(void); |
f5e706ad | 187 | |
f05a6865 SR |
188 | unsigned long __must_check ___copy_from_user(void *to, |
189 | const void __user *from, | |
190 | unsigned long size); | |
f5e706ad SR |
191 | static inline unsigned long __must_check |
192 | copy_from_user(void *to, const void __user *from, unsigned long size) | |
193 | { | |
81409e9e | 194 | check_object_size(to, size, false); |
9d9208a1 | 195 | |
0fd0ff01 | 196 | return ___copy_from_user(to, from, size); |
f5e706ad SR |
197 | } |
198 | #define __copy_from_user copy_from_user | |
199 | ||
f05a6865 SR |
200 | unsigned long __must_check ___copy_to_user(void __user *to, |
201 | const void *from, | |
202 | unsigned long size); | |
f5e706ad SR |
203 | static inline unsigned long __must_check |
204 | copy_to_user(void __user *to, const void *from, unsigned long size) | |
205 | { | |
81409e9e KC |
206 | check_object_size(from, size, true); |
207 | ||
0fd0ff01 | 208 | return ___copy_to_user(to, from, size); |
f5e706ad SR |
209 | } |
210 | #define __copy_to_user copy_to_user | |
211 | ||
f05a6865 SR |
212 | unsigned long __must_check ___copy_in_user(void __user *to, |
213 | const void __user *from, | |
214 | unsigned long size); | |
f5e706ad SR |
215 | static inline unsigned long __must_check |
216 | copy_in_user(void __user *to, void __user *from, unsigned long size) | |
217 | { | |
0fd0ff01 | 218 | return ___copy_in_user(to, from, size); |
f5e706ad SR |
219 | } |
220 | #define __copy_in_user copy_in_user | |
221 | ||
f05a6865 | 222 | unsigned long __must_check __clear_user(void __user *, unsigned long); |
f5e706ad SR |
223 | |
224 | #define clear_user __clear_user | |
225 | ||
f05a6865 SR |
226 | __must_check long strlen_user(const char __user *str); |
227 | __must_check long strnlen_user(const char __user *str, long n); | |
f5e706ad | 228 | |
16932237 DK |
229 | #define __copy_to_user_inatomic __copy_to_user |
230 | #define __copy_from_user_inatomic __copy_from_user | |
f5e706ad | 231 | |
f88620b9 | 232 | struct pt_regs; |
f05a6865 SR |
233 | unsigned long compute_effective_address(struct pt_regs *, |
234 | unsigned int insn, | |
235 | unsigned int rd); | |
f88620b9 | 236 | |
f5e706ad SR |
237 | #endif /* __ASSEMBLY__ */ |
238 | ||
239 | #endif /* _ASM_UACCESS_H */ |