]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/asm-sparc64/uaccess.h
Merge /spare/repo/linux-2.6/
[mirror_ubuntu-bionic-kernel.git] / include / asm-sparc64 / uaccess.h
CommitLineData
1da177e4
LT
1/* $Id: uaccess.h,v 1.35 2002/02/09 19:49:31 davem Exp $ */
2#ifndef _ASM_UACCESS_H
3#define _ASM_UACCESS_H
4
5/*
6 * User space memory access functions
7 */
8
9#ifdef __KERNEL__
10#include <linux/compiler.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <asm/a.out.h>
14#include <asm/asi.h>
15#include <asm/system.h>
16#include <asm/spitfire.h>
17#include <asm-generic/uaccess.h>
18#endif
19
20#ifndef __ASSEMBLY__
21
22/*
23 * Sparc64 is segmented, though more like the M68K than the I386.
24 * We use the secondary ASI to address user memory, which references a
25 * completely different VM map, thus there is zero chance of the user
26 * doing something queer and tricking us into poking kernel memory.
27 *
28 * What is left here is basically what is needed for the other parts of
29 * the kernel that expect to be able to manipulate, erum, "segments".
30 * Or perhaps more properly, permissions.
31 *
32 * "For historical reasons, these macros are grossly misnamed." -Linus
33 */
34
35#define KERNEL_DS ((mm_segment_t) { ASI_P })
36#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
37
38#define VERIFY_READ 0
39#define VERIFY_WRITE 1
40
41#define get_fs() ((mm_segment_t) { get_thread_current_ds() })
42#define get_ds() (KERNEL_DS)
43
44#define segment_eq(a,b) ((a).seg == (b).seg)
45
46#define set_fs(val) \
47do { \
48 set_thread_current_ds((val).seg); \
49 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
50} while(0)
51
52static inline int __access_ok(const void __user * addr, unsigned long size)
53{
54 return 1;
55}
56
57static inline int access_ok(int type, const void __user * addr, unsigned long size)
58{
59 return 1;
60}
61
1da177e4
LT
62/*
63 * The exception table consists of pairs of addresses: the first is the
64 * address of an instruction that is allowed to fault, and the second is
65 * the address at which the program should continue. No registers are
66 * modified, so it is entirely up to the continuation code to figure out
67 * what to do.
68 *
69 * All the routines below use bits of fixup code that are out of line
70 * with the main instruction path. This means when everything is well,
71 * we don't even have to jump over them. Further, they do not intrude
72 * on our cache or tlb entries.
73 *
74 * There is a special way how to put a range of potentially faulting
75 * insns (like twenty ldd/std's with now intervening other instructions)
76 * You specify address of first in insn and 0 in fixup and in the next
77 * exception_table_entry you specify last potentially faulting insn + 1
78 * and in fixup the routine which should handle the fault.
79 * That fixup code will get
80 * (faulting_insn_address - first_insn_in_the_range_address)/4
81 * in %g2 (ie. index of the faulting instruction in the range).
82 */
83
84struct exception_table_entry
85{
86 unsigned insn, fixup;
87};
88
89/* Special exable search, which handles ranges. Returns fixup */
90unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
91
92extern void __ret_efault(void);
93
94/* Uh, these should become the main single-value transfer routines..
95 * They automatically use the right size if we just have the right
96 * pointer type..
97 *
98 * This gets kind of ugly. We want to return _two_ values in "get_user()"
99 * and yet we don't want to do any pointers, because that is too much
100 * of a performance impact. Thus we have a few rather ugly macros here,
101 * and hide all the ugliness from the user.
102 */
103#define put_user(x,ptr) ({ \
104unsigned long __pu_addr = (unsigned long)(ptr); \
105__chk_user_ptr(ptr); \
106__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); })
107
108#define get_user(x,ptr) ({ \
109unsigned long __gu_addr = (unsigned long)(ptr); \
110__chk_user_ptr(ptr); \
111__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); })
112
113#define __put_user(x,ptr) put_user(x,ptr)
114#define __get_user(x,ptr) get_user(x,ptr)
115
116struct __large_struct { unsigned long buf[100]; };
117#define __m(x) ((struct __large_struct *)(x))
118
119#define __put_user_nocheck(data,addr,size) ({ \
120register int __pu_ret; \
121switch (size) { \
122case 1: __put_user_asm(data,b,addr,__pu_ret); break; \
123case 2: __put_user_asm(data,h,addr,__pu_ret); break; \
124case 4: __put_user_asm(data,w,addr,__pu_ret); break; \
125case 8: __put_user_asm(data,x,addr,__pu_ret); break; \
126default: __pu_ret = __put_user_bad(); break; \
127} __pu_ret; })
128
129#define __put_user_nocheck_ret(data,addr,size,retval) ({ \
130register int __foo __asm__ ("l1"); \
131switch (size) { \
132case 1: __put_user_asm_ret(data,b,addr,retval,__foo); break; \
133case 2: __put_user_asm_ret(data,h,addr,retval,__foo); break; \
134case 4: __put_user_asm_ret(data,w,addr,retval,__foo); break; \
135case 8: __put_user_asm_ret(data,x,addr,retval,__foo); break; \
136default: if (__put_user_bad()) return retval; break; \
137} })
138
139#define __put_user_asm(x,size,addr,ret) \
140__asm__ __volatile__( \
141 "/* Put user asm, inline. */\n" \
142"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
143 "clr %0\n" \
144"2:\n\n\t" \
145 ".section .fixup,#alloc,#execinstr\n\t" \
146 ".align 4\n" \
147"3:\n\t" \
148 "b 2b\n\t" \
149 " mov %3, %0\n\n\t" \
150 ".previous\n\t" \
151 ".section __ex_table,#alloc\n\t" \
152 ".align 4\n\t" \
153 ".word 1b, 3b\n\t" \
154 ".previous\n\n\t" \
155 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
156 "i" (-EFAULT))
157
158#define __put_user_asm_ret(x,size,addr,ret,foo) \
159if (__builtin_constant_p(ret) && ret == -EFAULT) \
160__asm__ __volatile__( \
161 "/* Put user asm ret, inline. */\n" \
162"1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \
163 ".section __ex_table,#alloc\n\t" \
164 ".align 4\n\t" \
165 ".word 1b, __ret_efault\n\n\t" \
166 ".previous\n\n\t" \
167 : "=r" (foo) : "r" (x), "r" (__m(addr))); \
168else \
169__asm__ __volatile__( \
170 "/* Put user asm ret, inline. */\n" \
171"1:\t" "st"#size "a %1, [%2] %%asi\n\n\t" \
172 ".section .fixup,#alloc,#execinstr\n\t" \
173 ".align 4\n" \
174"3:\n\t" \
175 "ret\n\t" \
176 " restore %%g0, %3, %%o0\n\n\t" \
177 ".previous\n\t" \
178 ".section __ex_table,#alloc\n\t" \
179 ".align 4\n\t" \
180 ".word 1b, 3b\n\n\t" \
181 ".previous\n\n\t" \
182 : "=r" (foo) : "r" (x), "r" (__m(addr)), \
183 "i" (ret))
184
185extern int __put_user_bad(void);
186
187#define __get_user_nocheck(data,addr,size,type) ({ \
188register int __gu_ret; \
189register unsigned long __gu_val; \
190switch (size) { \
191case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \
192case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \
193case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \
194case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \
195default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \
196} data = (type) __gu_val; __gu_ret; })
197
198#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \
199register unsigned long __gu_val __asm__ ("l1"); \
200switch (size) { \
201case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \
202case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \
203case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \
204case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \
205default: if (__get_user_bad()) return retval; \
206} data = (type) __gu_val; })
207
208#define __get_user_asm(x,size,addr,ret) \
209__asm__ __volatile__( \
210 "/* Get user asm, inline. */\n" \
211"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
212 "clr %0\n" \
213"2:\n\n\t" \
214 ".section .fixup,#alloc,#execinstr\n\t" \
215 ".align 4\n" \
216"3:\n\t" \
217 "clr %1\n\t" \
218 "b 2b\n\t" \
219 " mov %3, %0\n\n\t" \
220 ".previous\n\t" \
221 ".section __ex_table,#alloc\n\t" \
222 ".align 4\n\t" \
223 ".word 1b, 3b\n\n\t" \
224 ".previous\n\t" \
225 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
226 "i" (-EFAULT))
227
228#define __get_user_asm_ret(x,size,addr,retval) \
229if (__builtin_constant_p(retval) && retval == -EFAULT) \
230__asm__ __volatile__( \
231 "/* Get user asm ret, inline. */\n" \
232"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
233 ".section __ex_table,#alloc\n\t" \
234 ".align 4\n\t" \
235 ".word 1b,__ret_efault\n\n\t" \
236 ".previous\n\t" \
237 : "=r" (x) : "r" (__m(addr))); \
238else \
239__asm__ __volatile__( \
240 "/* Get user asm ret, inline. */\n" \
241"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \
242 ".section .fixup,#alloc,#execinstr\n\t" \
243 ".align 4\n" \
244"3:\n\t" \
245 "ret\n\t" \
246 " restore %%g0, %2, %%o0\n\n\t" \
247 ".previous\n\t" \
248 ".section __ex_table,#alloc\n\t" \
249 ".align 4\n\t" \
250 ".word 1b, 3b\n\n\t" \
251 ".previous\n\t" \
252 : "=r" (x) : "r" (__m(addr)), "i" (retval))
253
254extern int __get_user_bad(void);
255
256extern unsigned long __must_check ___copy_from_user(void *to,
257 const void __user *from,
258 unsigned long size);
259extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
260 unsigned long size);
261static inline unsigned long __must_check
262copy_from_user(void *to, const void __user *from, unsigned long size)
263{
264 unsigned long ret = ___copy_from_user(to, from, size);
265
266 if (ret)
267 ret = copy_from_user_fixup(to, from, size);
268 return ret;
269}
270#define __copy_from_user copy_from_user
271
272extern unsigned long __must_check ___copy_to_user(void __user *to,
273 const void *from,
274 unsigned long size);
275extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
276 unsigned long size);
277static inline unsigned long __must_check
278copy_to_user(void __user *to, const void *from, unsigned long size)
279{
280 unsigned long ret = ___copy_to_user(to, from, size);
281
282 if (ret)
283 ret = copy_to_user_fixup(to, from, size);
284 return ret;
285}
286#define __copy_to_user copy_to_user
287
288extern unsigned long __must_check ___copy_in_user(void __user *to,
289 const void __user *from,
290 unsigned long size);
291extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
292 unsigned long size);
293static inline unsigned long __must_check
294copy_in_user(void __user *to, void __user *from, unsigned long size)
295{
296 unsigned long ret = ___copy_in_user(to, from, size);
297
298 if (ret)
299 ret = copy_in_user_fixup(to, from, size);
300 return ret;
301}
302#define __copy_in_user copy_in_user
303
304extern unsigned long __must_check __bzero_noasi(void __user *, unsigned long);
305
306static inline unsigned long __must_check
307__clear_user(void __user *addr, unsigned long size)
308{
309
310 return __bzero_noasi(addr, size);
311}
312
313#define clear_user __clear_user
314
315extern long __must_check __strncpy_from_user(char *dest, const char __user *src, long count);
316
317#define strncpy_from_user __strncpy_from_user
318
319extern long __strlen_user(const char __user *);
320extern long __strnlen_user(const char __user *, long len);
321
322#define strlen_user __strlen_user
323#define strnlen_user __strnlen_user
324#define __copy_to_user_inatomic __copy_to_user
325#define __copy_from_user_inatomic __copy_from_user
326
327#endif /* __ASSEMBLY__ */
328
329#endif /* _ASM_UACCESS_H */