]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/include/asm/uaccess_64.h
46324c6a4f6e3da0aba2f3a6576ade498c3ea57b
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / uaccess_64.h
1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
3
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
10 #include <linux/lockdep.h>
11 #include <asm/page.h>
12
13 /*
14 * Copy To/From Userspace
15 */
16
17 /* Handles exceptions in both to and from, but doesn't do access_ok */
18 __must_check unsigned long
19 copy_user_generic(void *to, const void *from, unsigned len);
20
21 __must_check unsigned long
22 _copy_to_user(void __user *to, const void *from, unsigned len);
23 __must_check unsigned long
24 _copy_from_user(void *to, const void __user *from, unsigned len);
25 __must_check unsigned long
26 copy_in_user(void __user *to, const void __user *from, unsigned len);
27
28 static inline unsigned long __must_check copy_from_user(void *to,
29 const void __user *from,
30 unsigned long n)
31 {
32 int sz = __compiletime_object_size(to);
33 int ret = -EFAULT;
34
35 might_fault();
36 if (likely(sz == -1 || sz >= n))
37 ret = _copy_from_user(to, from, n);
38 #ifdef CONFIG_DEBUG_VM
39 else
40 WARN(1, "Buffer overflow detected!\n");
41 #endif
42 return ret;
43 }
44
45 static __always_inline __must_check
46 int copy_to_user(void __user *dst, const void *src, unsigned size)
47 {
48 might_fault();
49
50 return _copy_to_user(dst, src, size);
51 }
52
53 static __always_inline __must_check
54 int __copy_from_user(void *dst, const void __user *src, unsigned size)
55 {
56 int ret = 0;
57
58 might_fault();
59 if (!__builtin_constant_p(size))
60 return copy_user_generic(dst, (__force void *)src, size);
61 switch (size) {
62 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
63 ret, "b", "b", "=q", 1);
64 return ret;
65 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
66 ret, "w", "w", "=r", 2);
67 return ret;
68 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
69 ret, "l", "k", "=r", 4);
70 return ret;
71 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
72 ret, "q", "", "=r", 8);
73 return ret;
74 case 10:
75 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
76 ret, "q", "", "=r", 10);
77 if (unlikely(ret))
78 return ret;
79 __get_user_asm(*(u16 *)(8 + (char *)dst),
80 (u16 __user *)(8 + (char __user *)src),
81 ret, "w", "w", "=r", 2);
82 return ret;
83 case 16:
84 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
85 ret, "q", "", "=r", 16);
86 if (unlikely(ret))
87 return ret;
88 __get_user_asm(*(u64 *)(8 + (char *)dst),
89 (u64 __user *)(8 + (char __user *)src),
90 ret, "q", "", "=r", 8);
91 return ret;
92 default:
93 return copy_user_generic(dst, (__force void *)src, size);
94 }
95 }
96
97 static __always_inline __must_check
98 int __copy_to_user(void __user *dst, const void *src, unsigned size)
99 {
100 int ret = 0;
101
102 might_fault();
103 if (!__builtin_constant_p(size))
104 return copy_user_generic((__force void *)dst, src, size);
105 switch (size) {
106 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
107 ret, "b", "b", "iq", 1);
108 return ret;
109 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
110 ret, "w", "w", "ir", 2);
111 return ret;
112 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
113 ret, "l", "k", "ir", 4);
114 return ret;
115 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
116 ret, "q", "", "er", 8);
117 return ret;
118 case 10:
119 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
120 ret, "q", "", "er", 10);
121 if (unlikely(ret))
122 return ret;
123 asm("":::"memory");
124 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
125 ret, "w", "w", "ir", 2);
126 return ret;
127 case 16:
128 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
129 ret, "q", "", "er", 16);
130 if (unlikely(ret))
131 return ret;
132 asm("":::"memory");
133 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
134 ret, "q", "", "er", 8);
135 return ret;
136 default:
137 return copy_user_generic((__force void *)dst, src, size);
138 }
139 }
140
141 static __always_inline __must_check
142 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
143 {
144 int ret = 0;
145
146 might_fault();
147 if (!__builtin_constant_p(size))
148 return copy_user_generic((__force void *)dst,
149 (__force void *)src, size);
150 switch (size) {
151 case 1: {
152 u8 tmp;
153 __get_user_asm(tmp, (u8 __user *)src,
154 ret, "b", "b", "=q", 1);
155 if (likely(!ret))
156 __put_user_asm(tmp, (u8 __user *)dst,
157 ret, "b", "b", "iq", 1);
158 return ret;
159 }
160 case 2: {
161 u16 tmp;
162 __get_user_asm(tmp, (u16 __user *)src,
163 ret, "w", "w", "=r", 2);
164 if (likely(!ret))
165 __put_user_asm(tmp, (u16 __user *)dst,
166 ret, "w", "w", "ir", 2);
167 return ret;
168 }
169
170 case 4: {
171 u32 tmp;
172 __get_user_asm(tmp, (u32 __user *)src,
173 ret, "l", "k", "=r", 4);
174 if (likely(!ret))
175 __put_user_asm(tmp, (u32 __user *)dst,
176 ret, "l", "k", "ir", 4);
177 return ret;
178 }
179 case 8: {
180 u64 tmp;
181 __get_user_asm(tmp, (u64 __user *)src,
182 ret, "q", "", "=r", 8);
183 if (likely(!ret))
184 __put_user_asm(tmp, (u64 __user *)dst,
185 ret, "q", "", "er", 8);
186 return ret;
187 }
188 default:
189 return copy_user_generic((__force void *)dst,
190 (__force void *)src, size);
191 }
192 }
193
194 __must_check long
195 strncpy_from_user(char *dst, const char __user *src, long count);
196 __must_check long
197 __strncpy_from_user(char *dst, const char __user *src, long count);
198 __must_check long strnlen_user(const char __user *str, long n);
199 __must_check long __strnlen_user(const char __user *str, long n);
200 __must_check long strlen_user(const char __user *str);
201 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
202 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
203
204 static __must_check __always_inline int
205 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
206 {
207 return copy_user_generic(dst, (__force const void *)src, size);
208 }
209
210 static __must_check __always_inline int
211 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
212 {
213 return copy_user_generic((__force void *)dst, src, size);
214 }
215
216 extern long __copy_user_nocache(void *dst, const void __user *src,
217 unsigned size, int zerorest);
218
219 static inline int
220 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
221 {
222 might_sleep();
223 return __copy_user_nocache(dst, src, size, 1);
224 }
225
226 static inline int
227 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
228 unsigned size)
229 {
230 return __copy_user_nocache(dst, src, size, 0);
231 }
232
233 unsigned long
234 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
235
236 #endif /* _ASM_X86_UACCESS_64_H */