]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/sparc/include/asm/uaccess_64.h
Merge remote-tracking branches 'asoc/topic/sgtl5000', 'asoc/topic/simple', 'asoc...
[mirror_ubuntu-zesty-kernel.git] / arch / sparc / include / asm / uaccess_64.h
CommitLineData
f5e706ad
SR
1#ifndef _ASM_UACCESS_H
2#define _ASM_UACCESS_H
3
4/*
5 * User space memory access functions
6 */
7
8#ifdef __KERNEL__
fb34035e 9#include <linux/errno.h>
f5e706ad 10#include <linux/compiler.h>
f5e706ad 11#include <linux/string.h>
8abf9196 12#include <linux/thread_info.h>
f5e706ad 13#include <asm/asi.h>
f5e706ad 14#include <asm/spitfire.h>
5b17e1cd 15#include <asm-generic/uaccess-unaligned.h>
f5e706ad
SR
16#endif
17
18#ifndef __ASSEMBLY__
19
2c66f623
DM
20#include <asm/processor.h>
21
f5e706ad
SR
22/*
23 * Sparc64 is segmented, though more like the M68K than the I386.
24 * We use the secondary ASI to address user memory, which references a
25 * completely different VM map, thus there is zero chance of the user
26 * doing something queer and tricking us into poking kernel memory.
27 *
28 * What is left here is basically what is needed for the other parts of
29 * the kernel that expect to be able to manipulate, erum, "segments".
30 * Or perhaps more properly, permissions.
31 *
32 * "For historical reasons, these macros are grossly misnamed." -Linus
33 */
34
35#define KERNEL_DS ((mm_segment_t) { ASI_P })
36#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
37
38#define VERIFY_READ 0
39#define VERIFY_WRITE 1
40
dff933da 41#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
f5e706ad
SR
42#define get_ds() (KERNEL_DS)
43
7185820a 44#define segment_eq(a, b) ((a).seg == (b).seg)
f5e706ad
SR
45
46#define set_fs(val) \
47do { \
7185820a 48 current_thread_info()->current_ds = (val).seg; \
f5e706ad
SR
49 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
50} while(0)
51
b69fb769
DA
52/*
53 * Test whether a block of memory is a valid user space address.
54 * Returns 0 if the range is valid, nonzero otherwise.
55 */
56static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
57{
58 if (__builtin_constant_p(size))
59 return addr > limit - size;
60
61 addr += size;
62 if (addr < size)
63 return true;
64
65 return addr > limit;
66}
67
68#define __range_not_ok(addr, size, limit) \
69({ \
70 __chk_user_ptr(addr); \
71 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
72})
73
f5e706ad
SR
74static inline int __access_ok(const void __user * addr, unsigned long size)
75{
76 return 1;
77}
78
79static inline int access_ok(int type, const void __user * addr, unsigned long size)
80{
81 return 1;
82}
83
84/*
85 * The exception table consists of pairs of addresses: the first is the
86 * address of an instruction that is allowed to fault, and the second is
87 * the address at which the program should continue. No registers are
88 * modified, so it is entirely up to the continuation code to figure out
89 * what to do.
90 *
91 * All the routines below use bits of fixup code that are out of line
92 * with the main instruction path. This means when everything is well,
93 * we don't even have to jump over them. Further, they do not intrude
94 * on our cache or tlb entries.
95 */
96
97struct exception_table_entry {
98 unsigned int insn, fixup;
99};
100
f05a6865
SR
101void __ret_efault(void);
102void __retl_efault(void);
f5e706ad
SR
103
104/* Uh, these should become the main single-value transfer routines..
105 * They automatically use the right size if we just have the right
106 * pointer type..
107 *
108 * This gets kind of ugly. We want to return _two_ values in "get_user()"
109 * and yet we don't want to do any pointers, because that is too much
110 * of a performance impact. Thus we have a few rather ugly macros here,
111 * and hide all the ugliness from the user.
112 */
7185820a
MT
113#define put_user(x, ptr) ({ \
114 unsigned long __pu_addr = (unsigned long)(ptr); \
115 __chk_user_ptr(ptr); \
116 __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
117})
f5e706ad 118
7185820a
MT
119#define get_user(x, ptr) ({ \
120 unsigned long __gu_addr = (unsigned long)(ptr); \
121 __chk_user_ptr(ptr); \
122 __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
123})
f5e706ad 124
7185820a
MT
125#define __put_user(x, ptr) put_user(x, ptr)
126#define __get_user(x, ptr) get_user(x, ptr)
f5e706ad
SR
127
128struct __large_struct { unsigned long buf[100]; };
129#define __m(x) ((struct __large_struct *)(x))
130
4b636ba2
MT
131#define __put_user_nocheck(data, addr, size) ({ \
132 register int __pu_ret; \
133 switch (size) { \
134 case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
135 case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
136 case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
137 case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
138 default: __pu_ret = __put_user_bad(); break; \
139 } \
140 __pu_ret; \
7185820a
MT
141})
142
143#define __put_user_asm(x, size, addr, ret) \
f5e706ad 144__asm__ __volatile__( \
7185820a
MT
145 "/* Put user asm, inline. */\n" \
146 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
147 "clr %0\n" \
148 "2:\n\n\t" \
149 ".section .fixup,#alloc,#execinstr\n\t" \
150 ".align 4\n" \
151 "3:\n\t" \
152 "sethi %%hi(2b), %0\n\t" \
153 "jmpl %0 + %%lo(2b), %%g0\n\t" \
154 " mov %3, %0\n\n\t" \
155 ".previous\n\t" \
156 ".section __ex_table,\"a\"\n\t" \
157 ".align 4\n\t" \
158 ".word 1b, 3b\n\t" \
159 ".previous\n\n\t" \
160 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
161 "i" (-EFAULT))
f5e706ad 162
f05a6865 163int __put_user_bad(void);
f5e706ad 164
4b636ba2
MT
165#define __get_user_nocheck(data, addr, size, type) ({ \
166 register int __gu_ret; \
167 register unsigned long __gu_val; \
168 switch (size) { \
169 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
170 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
171 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
172 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
173 default: \
174 __gu_val = 0; \
175 __gu_ret = __get_user_bad(); \
176 break; \
177 } \
178 data = (__force type) __gu_val; \
179 __gu_ret; \
7185820a
MT
180})
181
7185820a 182#define __get_user_asm(x, size, addr, ret) \
f5e706ad 183__asm__ __volatile__( \
7185820a
MT
184 "/* Get user asm, inline. */\n" \
185 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
186 "clr %0\n" \
187 "2:\n\n\t" \
188 ".section .fixup,#alloc,#execinstr\n\t" \
189 ".align 4\n" \
190 "3:\n\t" \
191 "sethi %%hi(2b), %0\n\t" \
192 "clr %1\n\t" \
193 "jmpl %0 + %%lo(2b), %%g0\n\t" \
194 " mov %3, %0\n\n\t" \
195 ".previous\n\t" \
196 ".section __ex_table,\"a\"\n\t" \
197 ".align 4\n\t" \
198 ".word 1b, 3b\n\n\t" \
199 ".previous\n\t" \
200 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
201 "i" (-EFAULT))
202
f05a6865 203int __get_user_bad(void);
f5e706ad 204
f05a6865
SR
205unsigned long __must_check ___copy_from_user(void *to,
206 const void __user *from,
207 unsigned long size);
208unsigned long copy_from_user_fixup(void *to, const void __user *from,
209 unsigned long size);
f5e706ad
SR
210static inline unsigned long __must_check
211copy_from_user(void *to, const void __user *from, unsigned long size)
212{
9d9208a1 213 unsigned long ret;
4cb6066a 214
81409e9e 215 check_object_size(to, size, false);
9d9208a1
KC
216
217 ret = ___copy_from_user(to, from, size);
4cb6066a
DM
218 if (unlikely(ret))
219 ret = copy_from_user_fixup(to, from, size);
220
f5e706ad
SR
221 return ret;
222}
223#define __copy_from_user copy_from_user
224
f05a6865
SR
225unsigned long __must_check ___copy_to_user(void __user *to,
226 const void *from,
227 unsigned long size);
228unsigned long copy_to_user_fixup(void __user *to, const void *from,
229 unsigned long size);
f5e706ad
SR
230static inline unsigned long __must_check
231copy_to_user(void __user *to, const void *from, unsigned long size)
232{
9d9208a1 233 unsigned long ret;
f5e706ad 234
81409e9e
KC
235 check_object_size(from, size, true);
236
9d9208a1 237 ret = ___copy_to_user(to, from, size);
f5e706ad
SR
238 if (unlikely(ret))
239 ret = copy_to_user_fixup(to, from, size);
240 return ret;
241}
242#define __copy_to_user copy_to_user
243
f05a6865
SR
244unsigned long __must_check ___copy_in_user(void __user *to,
245 const void __user *from,
246 unsigned long size);
247unsigned long copy_in_user_fixup(void __user *to, void __user *from,
248 unsigned long size);
f5e706ad
SR
249static inline unsigned long __must_check
250copy_in_user(void __user *to, void __user *from, unsigned long size)
251{
252 unsigned long ret = ___copy_in_user(to, from, size);
253
254 if (unlikely(ret))
255 ret = copy_in_user_fixup(to, from, size);
256 return ret;
257}
258#define __copy_in_user copy_in_user
259
f05a6865 260unsigned long __must_check __clear_user(void __user *, unsigned long);
f5e706ad
SR
261
262#define clear_user __clear_user
263
f05a6865
SR
264__must_check long strlen_user(const char __user *str);
265__must_check long strnlen_user(const char __user *str, long n);
f5e706ad 266
16932237
DK
267#define __copy_to_user_inatomic __copy_to_user
268#define __copy_from_user_inatomic __copy_from_user
f5e706ad 269
f88620b9 270struct pt_regs;
f05a6865
SR
271unsigned long compute_effective_address(struct pt_regs *,
272 unsigned int insn,
273 unsigned int rd);
f88620b9 274
f5e706ad
SR
275#endif /* __ASSEMBLY__ */
276
277#endif /* _ASM_UACCESS_H */