]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/s390/include/asm/uaccess.h
Merge tag 'batadv-net-for-davem-20170316' of git://git.open-mesh.org/linux-merge
[mirror_ubuntu-eoan-kernel.git] / arch / s390 / include / asm / uaccess.h
1 /*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9 #ifndef __S390_UACCESS_H
10 #define __S390_UACCESS_H
11
12 /*
13 * User space memory access functions
14 */
15 #include <linux/sched.h>
16 #include <linux/errno.h>
17 #include <asm/processor.h>
18 #include <asm/ctl_reg.h>
19
20 #define VERIFY_READ 0
21 #define VERIFY_WRITE 1
22
23
24 /*
25 * The fs value determines whether argument validity checking should be
26 * performed or not. If get_fs() == USER_DS, checking is performed, with
27 * get_fs() == KERNEL_DS, checking is bypassed.
28 *
29 * For historical reasons, these macros are grossly misnamed.
30 */
31
32 #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
33
34
35 #define KERNEL_DS MAKE_MM_SEG(0)
36 #define USER_DS MAKE_MM_SEG(1)
37
38 #define get_ds() (KERNEL_DS)
39 #define get_fs() (current->thread.mm_segment)
40 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
41
42 static inline void set_fs(mm_segment_t fs)
43 {
44 current->thread.mm_segment = fs;
45 if (segment_eq(fs, KERNEL_DS)) {
46 set_cpu_flag(CIF_ASCE_SECONDARY);
47 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
48 } else {
49 clear_cpu_flag(CIF_ASCE_SECONDARY);
50 __ctl_load(S390_lowcore.user_asce, 7, 7);
51 }
52 }
53
54 static inline int __range_ok(unsigned long addr, unsigned long size)
55 {
56 return 1;
57 }
58
59 #define __access_ok(addr, size) \
60 ({ \
61 __chk_user_ptr(addr); \
62 __range_ok((unsigned long)(addr), (size)); \
63 })
64
65 #define access_ok(type, addr, size) __access_ok(addr, size)
66
67 /*
68 * The exception table consists of pairs of addresses: the first is the
69 * address of an instruction that is allowed to fault, and the second is
70 * the address at which the program should continue. No registers are
71 * modified, so it is entirely up to the continuation code to figure out
72 * what to do.
73 *
74 * All the routines below use bits of fixup code that are out of line
75 * with the main instruction path. This means when everything is well,
76 * we don't even have to jump over them. Further, they do not intrude
77 * on our cache or tlb entries.
78 */
79
80 struct exception_table_entry
81 {
82 int insn, fixup;
83 };
84
85 static inline unsigned long extable_fixup(const struct exception_table_entry *x)
86 {
87 return (unsigned long)&x->fixup + x->fixup;
88 }
89
90 #define ARCH_HAS_RELATIVE_EXTABLE
91
92 /**
93 * __copy_from_user: - Copy a block of data from user space, with less checking.
94 * @to: Destination address, in kernel space.
95 * @from: Source address, in user space.
96 * @n: Number of bytes to copy.
97 *
98 * Context: User context only. This function may sleep if pagefaults are
99 * enabled.
100 *
101 * Copy data from user space to kernel space. Caller must check
102 * the specified block with access_ok() before calling this function.
103 *
104 * Returns number of bytes that could not be copied.
105 * On success, this will be zero.
106 *
107 * If some data could not be copied, this function will pad the copied
108 * data to the requested size using zero bytes.
109 */
110 unsigned long __must_check __copy_from_user(void *to, const void __user *from,
111 unsigned long n);
112
113 /**
114 * __copy_to_user: - Copy a block of data into user space, with less checking.
115 * @to: Destination address, in user space.
116 * @from: Source address, in kernel space.
117 * @n: Number of bytes to copy.
118 *
119 * Context: User context only. This function may sleep if pagefaults are
120 * enabled.
121 *
122 * Copy data from kernel space to user space. Caller must check
123 * the specified block with access_ok() before calling this function.
124 *
125 * Returns number of bytes that could not be copied.
126 * On success, this will be zero.
127 */
128 unsigned long __must_check __copy_to_user(void __user *to, const void *from,
129 unsigned long n);
130
131 #define __copy_to_user_inatomic __copy_to_user
132 #define __copy_from_user_inatomic __copy_from_user
133
134 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
135
136 #define __put_get_user_asm(to, from, size, spec) \
137 ({ \
138 register unsigned long __reg0 asm("0") = spec; \
139 int __rc; \
140 \
141 asm volatile( \
142 "0: mvcos %1,%3,%2\n" \
143 "1: xr %0,%0\n" \
144 "2:\n" \
145 ".pushsection .fixup, \"ax\"\n" \
146 "3: lhi %0,%5\n" \
147 " jg 2b\n" \
148 ".popsection\n" \
149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
150 : "=d" (__rc), "=Q" (*(to)) \
151 : "d" (size), "Q" (*(from)), \
152 "d" (__reg0), "K" (-EFAULT) \
153 : "cc"); \
154 __rc; \
155 })
156
157 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
158 {
159 unsigned long spec = 0x810000UL;
160 int rc;
161
162 switch (size) {
163 case 1:
164 rc = __put_get_user_asm((unsigned char __user *)ptr,
165 (unsigned char *)x,
166 size, spec);
167 break;
168 case 2:
169 rc = __put_get_user_asm((unsigned short __user *)ptr,
170 (unsigned short *)x,
171 size, spec);
172 break;
173 case 4:
174 rc = __put_get_user_asm((unsigned int __user *)ptr,
175 (unsigned int *)x,
176 size, spec);
177 break;
178 case 8:
179 rc = __put_get_user_asm((unsigned long __user *)ptr,
180 (unsigned long *)x,
181 size, spec);
182 break;
183 }
184 return rc;
185 }
186
187 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
188 {
189 unsigned long spec = 0x81UL;
190 int rc;
191
192 switch (size) {
193 case 1:
194 rc = __put_get_user_asm((unsigned char *)x,
195 (unsigned char __user *)ptr,
196 size, spec);
197 break;
198 case 2:
199 rc = __put_get_user_asm((unsigned short *)x,
200 (unsigned short __user *)ptr,
201 size, spec);
202 break;
203 case 4:
204 rc = __put_get_user_asm((unsigned int *)x,
205 (unsigned int __user *)ptr,
206 size, spec);
207 break;
208 case 8:
209 rc = __put_get_user_asm((unsigned long *)x,
210 (unsigned long __user *)ptr,
211 size, spec);
212 break;
213 }
214 return rc;
215 }
216
217 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
218
219 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
220 {
221 size = __copy_to_user(ptr, x, size);
222 return size ? -EFAULT : 0;
223 }
224
225 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
226 {
227 size = __copy_from_user(x, ptr, size);
228 return size ? -EFAULT : 0;
229 }
230
231 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
232
233 /*
234 * These are the main single-value transfer routines. They automatically
235 * use the right size if we just have the right pointer type.
236 */
237 #define __put_user(x, ptr) \
238 ({ \
239 __typeof__(*(ptr)) __x = (x); \
240 int __pu_err = -EFAULT; \
241 __chk_user_ptr(ptr); \
242 switch (sizeof (*(ptr))) { \
243 case 1: \
244 case 2: \
245 case 4: \
246 case 8: \
247 __pu_err = __put_user_fn(&__x, ptr, \
248 sizeof(*(ptr))); \
249 break; \
250 default: \
251 __put_user_bad(); \
252 break; \
253 } \
254 __builtin_expect(__pu_err, 0); \
255 })
256
257 #define put_user(x, ptr) \
258 ({ \
259 might_fault(); \
260 __put_user(x, ptr); \
261 })
262
263
264 int __put_user_bad(void) __attribute__((noreturn));
265
266 #define __get_user(x, ptr) \
267 ({ \
268 int __gu_err = -EFAULT; \
269 __chk_user_ptr(ptr); \
270 switch (sizeof(*(ptr))) { \
271 case 1: { \
272 unsigned char __x = 0; \
273 __gu_err = __get_user_fn(&__x, ptr, \
274 sizeof(*(ptr))); \
275 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
276 break; \
277 }; \
278 case 2: { \
279 unsigned short __x = 0; \
280 __gu_err = __get_user_fn(&__x, ptr, \
281 sizeof(*(ptr))); \
282 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
283 break; \
284 }; \
285 case 4: { \
286 unsigned int __x = 0; \
287 __gu_err = __get_user_fn(&__x, ptr, \
288 sizeof(*(ptr))); \
289 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
290 break; \
291 }; \
292 case 8: { \
293 unsigned long long __x = 0; \
294 __gu_err = __get_user_fn(&__x, ptr, \
295 sizeof(*(ptr))); \
296 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
297 break; \
298 }; \
299 default: \
300 __get_user_bad(); \
301 break; \
302 } \
303 __builtin_expect(__gu_err, 0); \
304 })
305
306 #define get_user(x, ptr) \
307 ({ \
308 might_fault(); \
309 __get_user(x, ptr); \
310 })
311
312 int __get_user_bad(void) __attribute__((noreturn));
313
314 #define __put_user_unaligned __put_user
315 #define __get_user_unaligned __get_user
316
317 extern void __compiletime_error("usercopy buffer size is too small")
318 __bad_copy_user(void);
319
320 static inline void copy_user_overflow(int size, unsigned long count)
321 {
322 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
323 }
324
325 /**
326 * copy_to_user: - Copy a block of data into user space.
327 * @to: Destination address, in user space.
328 * @from: Source address, in kernel space.
329 * @n: Number of bytes to copy.
330 *
331 * Context: User context only. This function may sleep if pagefaults are
332 * enabled.
333 *
334 * Copy data from kernel space to user space.
335 *
336 * Returns number of bytes that could not be copied.
337 * On success, this will be zero.
338 */
339 static inline unsigned long __must_check
340 copy_to_user(void __user *to, const void *from, unsigned long n)
341 {
342 might_fault();
343 return __copy_to_user(to, from, n);
344 }
345
346 /**
347 * copy_from_user: - Copy a block of data from user space.
348 * @to: Destination address, in kernel space.
349 * @from: Source address, in user space.
350 * @n: Number of bytes to copy.
351 *
352 * Context: User context only. This function may sleep if pagefaults are
353 * enabled.
354 *
355 * Copy data from user space to kernel space.
356 *
357 * Returns number of bytes that could not be copied.
358 * On success, this will be zero.
359 *
360 * If some data could not be copied, this function will pad the copied
361 * data to the requested size using zero bytes.
362 */
363 static inline unsigned long __must_check
364 copy_from_user(void *to, const void __user *from, unsigned long n)
365 {
366 unsigned int sz = __compiletime_object_size(to);
367
368 might_fault();
369 if (unlikely(sz != -1 && sz < n)) {
370 if (!__builtin_constant_p(n))
371 copy_user_overflow(sz, n);
372 else
373 __bad_copy_user();
374 return n;
375 }
376 return __copy_from_user(to, from, n);
377 }
378
379 unsigned long __must_check
380 __copy_in_user(void __user *to, const void __user *from, unsigned long n);
381
382 static inline unsigned long __must_check
383 copy_in_user(void __user *to, const void __user *from, unsigned long n)
384 {
385 might_fault();
386 return __copy_in_user(to, from, n);
387 }
388
389 /*
390 * Copy a null terminated string from userspace.
391 */
392
393 long __strncpy_from_user(char *dst, const char __user *src, long count);
394
395 static inline long __must_check
396 strncpy_from_user(char *dst, const char __user *src, long count)
397 {
398 might_fault();
399 return __strncpy_from_user(dst, src, count);
400 }
401
402 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
403
404 static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
405 {
406 might_fault();
407 return __strnlen_user(src, n);
408 }
409
410 /**
411 * strlen_user: - Get the size of a string in user space.
412 * @str: The string to measure.
413 *
414 * Context: User context only. This function may sleep if pagefaults are
415 * enabled.
416 *
417 * Get the size of a NUL-terminated string in user space.
418 *
419 * Returns the size of the string INCLUDING the terminating NUL.
420 * On exception, returns 0.
421 *
422 * If there is a limit on the length of a valid string, you may wish to
423 * consider using strnlen_user() instead.
424 */
425 #define strlen_user(str) strnlen_user(str, ~0UL)
426
427 /*
428 * Zero Userspace
429 */
430 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
431
432 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
433 {
434 might_fault();
435 return __clear_user(to, n);
436 }
437
438 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
439 void s390_kernel_write(void *dst, const void *src, size_t size);
440
441 #endif /* __S390_UACCESS_H */