]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/s390/include/asm/uaccess.h
Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-eoan-kernel.git] / arch / s390 / include / asm / uaccess.h
CommitLineData
1da177e4 1/*
1da177e4 2 * S390 version
a53c8fab 3 * Copyright IBM Corp. 1999, 2000
1da177e4
LT
4 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9#ifndef __S390_UACCESS_H
10#define __S390_UACCESS_H
11
12/*
13 * User space memory access functions
14 */
15#include <linux/sched.h>
16#include <linux/errno.h>
b5a882fc 17#include <asm/processor.h>
a0616cde 18#include <asm/ctl_reg.h>
1da177e4
LT
19
20#define VERIFY_READ 0
21#define VERIFY_WRITE 1
22
23
24/*
25 * The fs value determines whether argument validity checking should be
26 * performed or not. If get_fs() == USER_DS, checking is performed, with
27 * get_fs() == KERNEL_DS, checking is bypassed.
28 *
29 * For historical reasons, these macros are grossly misnamed.
30 */
31
32#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
33
34
35#define KERNEL_DS MAKE_MM_SEG(0)
36#define USER_DS MAKE_MM_SEG(1)
37
38#define get_ds() (KERNEL_DS)
39#define get_fs() (current->thread.mm_segment)
1da177e4
LT
40#define segment_eq(a,b) ((a).ar4 == (b).ar4)
41
b5a882fc
HC
42static inline void set_fs(mm_segment_t fs)
43{
44 current->thread.mm_segment = fs;
45 if (segment_eq(fs, KERNEL_DS)) {
46 set_cpu_flag(CIF_ASCE_SECONDARY);
47 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
48 } else {
49 clear_cpu_flag(CIF_ASCE_SECONDARY);
50 __ctl_load(S390_lowcore.user_asce, 7, 7);
51 }
52}
53
491af990
HC
54static inline int __range_ok(unsigned long addr, unsigned long size)
55{
56 return 1;
57}
58
59#define __access_ok(addr, size) \
60({ \
61 __chk_user_ptr(addr); \
62 __range_ok((unsigned long)(addr), (size)); \
7683f744 63})
1da177e4 64
7683f744 65#define access_ok(type, addr, size) __access_ok(addr, size)
1da177e4 66
1da177e4
LT
67/*
68 * The exception table consists of pairs of addresses: the first is the
69 * address of an instruction that is allowed to fault, and the second is
70 * the address at which the program should continue. No registers are
71 * modified, so it is entirely up to the continuation code to figure out
72 * what to do.
73 *
74 * All the routines below use bits of fixup code that are out of line
75 * with the main instruction path. This means when everything is well,
76 * we don't even have to jump over them. Further, they do not intrude
77 * on our cache or tlb entries.
78 */
79
80struct exception_table_entry
81{
eb608fb3 82 int insn, fixup;
1da177e4
LT
83};
84
eb608fb3
HC
85static inline unsigned long extable_fixup(const struct exception_table_entry *x)
86{
87 return (unsigned long)&x->fixup + x->fixup;
88}
89
c352e8b6 90#define ARCH_HAS_RELATIVE_EXTABLE
eb608fb3 91
4f41c2b4
HC
92/**
93 * __copy_from_user: - Copy a block of data from user space, with less checking.
94 * @to: Destination address, in kernel space.
95 * @from: Source address, in user space.
96 * @n: Number of bytes to copy.
97 *
b3c395ef
DH
98 * Context: User context only. This function may sleep if pagefaults are
99 * enabled.
4f41c2b4
HC
100 *
101 * Copy data from user space to kernel space. Caller must check
102 * the specified block with access_ok() before calling this function.
103 *
104 * Returns number of bytes that could not be copied.
105 * On success, this will be zero.
106 *
107 * If some data could not be copied, this function will pad the copied
108 * data to the requested size using zero bytes.
109 */
211deca6
HC
110unsigned long __must_check __copy_from_user(void *to, const void __user *from,
111 unsigned long n);
4f41c2b4
HC
112
113/**
114 * __copy_to_user: - Copy a block of data into user space, with less checking.
115 * @to: Destination address, in user space.
116 * @from: Source address, in kernel space.
117 * @n: Number of bytes to copy.
118 *
b3c395ef
DH
119 * Context: User context only. This function may sleep if pagefaults are
120 * enabled.
4f41c2b4
HC
121 *
122 * Copy data from kernel space to user space. Caller must check
123 * the specified block with access_ok() before calling this function.
124 *
125 * Returns number of bytes that could not be copied.
126 * On success, this will be zero.
127 */
128unsigned long __must_check __copy_to_user(void __user *to, const void *from,
129 unsigned long n);
d02765d1 130
4f41c2b4
HC
131#define __copy_to_user_inatomic __copy_to_user
132#define __copy_from_user_inatomic __copy_from_user
6c1e3e79 133
c9ca7841
HC
134#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
135
136#define __put_get_user_asm(to, from, size, spec) \
137({ \
138 register unsigned long __reg0 asm("0") = spec; \
139 int __rc; \
140 \
141 asm volatile( \
142 "0: mvcos %1,%3,%2\n" \
143 "1: xr %0,%0\n" \
144 "2:\n" \
145 ".pushsection .fixup, \"ax\"\n" \
146 "3: lhi %0,%5\n" \
147 " jg 2b\n" \
148 ".popsection\n" \
149 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
d09c5373 150 : "=d" (__rc), "+Q" (*(to)) \
c9ca7841
HC
151 : "d" (size), "Q" (*(from)), \
152 "d" (__reg0), "K" (-EFAULT) \
153 : "cc"); \
154 __rc; \
155})
156
dc4aace1
HC
157static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
158{
159 unsigned long spec = 0x810000UL;
160 int rc;
161
162 switch (size) {
163 case 1:
164 rc = __put_get_user_asm((unsigned char __user *)ptr,
165 (unsigned char *)x,
166 size, spec);
167 break;
168 case 2:
169 rc = __put_get_user_asm((unsigned short __user *)ptr,
170 (unsigned short *)x,
171 size, spec);
172 break;
173 case 4:
174 rc = __put_get_user_asm((unsigned int __user *)ptr,
175 (unsigned int *)x,
176 size, spec);
177 break;
178 case 8:
179 rc = __put_get_user_asm((unsigned long __user *)ptr,
180 (unsigned long *)x,
181 size, spec);
182 break;
0b925159 183 }
dc4aace1
HC
184 return rc;
185}
186
187static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
188{
189 unsigned long spec = 0x81UL;
190 int rc;
191
192 switch (size) {
193 case 1:
194 rc = __put_get_user_asm((unsigned char *)x,
195 (unsigned char __user *)ptr,
196 size, spec);
197 break;
198 case 2:
199 rc = __put_get_user_asm((unsigned short *)x,
200 (unsigned short __user *)ptr,
201 size, spec);
202 break;
203 case 4:
204 rc = __put_get_user_asm((unsigned int *)x,
205 (unsigned int __user *)ptr,
206 size, spec);
207 break;
208 case 8:
209 rc = __put_get_user_asm((unsigned long *)x,
210 (unsigned long __user *)ptr,
211 size, spec);
212 break;
0b925159 213 }
dc4aace1
HC
214 return rc;
215}
c9ca7841
HC
216
217#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
218
211deca6 219static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
d02765d1 220{
4f41c2b4
HC
221 size = __copy_to_user(ptr, x, size);
222 return size ? -EFAULT : 0;
d02765d1
GS
223}
224
211deca6 225static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
d02765d1 226{
4f41c2b4
HC
227 size = __copy_from_user(x, ptr, size);
228 return size ? -EFAULT : 0;
d02765d1 229}
1da177e4 230
c9ca7841
HC
231#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
232
1da177e4
LT
233/*
234 * These are the main single-value transfer routines. They automatically
235 * use the right size if we just have the right pointer type.
236 */
1da177e4
LT
237#define __put_user(x, ptr) \
238({ \
239 __typeof__(*(ptr)) __x = (x); \
d02765d1 240 int __pu_err = -EFAULT; \
17566c3c 241 __chk_user_ptr(ptr); \
1da177e4
LT
242 switch (sizeof (*(ptr))) { \
243 case 1: \
244 case 2: \
245 case 4: \
246 case 8: \
cfa785e6
HC
247 __pu_err = __put_user_fn(&__x, ptr, \
248 sizeof(*(ptr))); \
1da177e4
LT
249 break; \
250 default: \
251 __put_user_bad(); \
252 break; \
253 } \
ee64baf4 254 __builtin_expect(__pu_err, 0); \
1da177e4 255})
1da177e4
LT
256
257#define put_user(x, ptr) \
258({ \
dab4079d 259 might_fault(); \
1da177e4
LT
260 __put_user(x, ptr); \
261})
262
263
4f41c2b4 264int __put_user_bad(void) __attribute__((noreturn));
1da177e4 265
1da177e4
LT
266#define __get_user(x, ptr) \
267({ \
d02765d1
GS
268 int __gu_err = -EFAULT; \
269 __chk_user_ptr(ptr); \
1da177e4 270 switch (sizeof(*(ptr))) { \
1047aa77 271 case 1: { \
fd2d2b19 272 unsigned char __x = 0; \
cfa785e6
HC
273 __gu_err = __get_user_fn(&__x, ptr, \
274 sizeof(*(ptr))); \
97fa5a66 275 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
276 break; \
277 }; \
278 case 2: { \
fd2d2b19 279 unsigned short __x = 0; \
cfa785e6
HC
280 __gu_err = __get_user_fn(&__x, ptr, \
281 sizeof(*(ptr))); \
97fa5a66 282 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
283 break; \
284 }; \
285 case 4: { \
fd2d2b19 286 unsigned int __x = 0; \
cfa785e6
HC
287 __gu_err = __get_user_fn(&__x, ptr, \
288 sizeof(*(ptr))); \
97fa5a66 289 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
290 break; \
291 }; \
292 case 8: { \
fd2d2b19 293 unsigned long long __x = 0; \
cfa785e6
HC
294 __gu_err = __get_user_fn(&__x, ptr, \
295 sizeof(*(ptr))); \
97fa5a66 296 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1da177e4 297 break; \
1047aa77 298 }; \
1da177e4
LT
299 default: \
300 __get_user_bad(); \
301 break; \
302 } \
ee64baf4 303 __builtin_expect(__gu_err, 0); \
1da177e4 304})
1da177e4
LT
305
306#define get_user(x, ptr) \
307({ \
dab4079d 308 might_fault(); \
1da177e4
LT
309 __get_user(x, ptr); \
310})
311
4f41c2b4 312int __get_user_bad(void) __attribute__((noreturn));
1da177e4
LT
313
314#define __put_user_unaligned __put_user
315#define __get_user_unaligned __get_user
316
0d025d27
JP
317extern void __compiletime_error("usercopy buffer size is too small")
318__bad_copy_user(void);
319
320static inline void copy_user_overflow(int size, unsigned long count)
321{
322 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
323}
324
1da177e4
LT
325/**
326 * copy_to_user: - Copy a block of data into user space.
327 * @to: Destination address, in user space.
328 * @from: Source address, in kernel space.
329 * @n: Number of bytes to copy.
330 *
b3c395ef
DH
331 * Context: User context only. This function may sleep if pagefaults are
332 * enabled.
1da177e4
LT
333 *
334 * Copy data from kernel space to user space.
335 *
336 * Returns number of bytes that could not be copied.
337 * On success, this will be zero.
338 */
f7675ad7 339static inline unsigned long __must_check
1da177e4
LT
340copy_to_user(void __user *to, const void *from, unsigned long n)
341{
dab4079d 342 might_fault();
d12a2970 343 return __copy_to_user(to, from, n);
1da177e4
LT
344}
345
1da177e4
LT
346/**
347 * copy_from_user: - Copy a block of data from user space.
348 * @to: Destination address, in kernel space.
349 * @from: Source address, in user space.
350 * @n: Number of bytes to copy.
351 *
b3c395ef
DH
352 * Context: User context only. This function may sleep if pagefaults are
353 * enabled.
1da177e4
LT
354 *
355 * Copy data from user space to kernel space.
356 *
357 * Returns number of bytes that could not be copied.
358 * On success, this will be zero.
359 *
360 * If some data could not be copied, this function will pad the copied
361 * data to the requested size using zero bytes.
362 */
f7675ad7 363static inline unsigned long __must_check
1da177e4
LT
364copy_from_user(void *to, const void __user *from, unsigned long n)
365{
1dcec254
HC
366 unsigned int sz = __compiletime_object_size(to);
367
dab4079d 368 might_fault();
1dcec254 369 if (unlikely(sz != -1 && sz < n)) {
0d025d27
JP
370 if (!__builtin_constant_p(n))
371 copy_user_overflow(sz, n);
372 else
373 __bad_copy_user();
1dcec254
HC
374 return n;
375 }
d12a2970 376 return __copy_from_user(to, from, n);
1da177e4
LT
377}
378
4f41c2b4
HC
379unsigned long __must_check
380__copy_in_user(void __user *to, const void __user *from, unsigned long n);
1da177e4 381
f7675ad7 382static inline unsigned long __must_check
1da177e4
LT
383copy_in_user(void __user *to, const void __user *from, unsigned long n)
384{
dab4079d 385 might_fault();
d12a2970 386 return __copy_in_user(to, from, n);
1da177e4
LT
387}
388
389/*
390 * Copy a null terminated string from userspace.
391 */
4f41c2b4
HC
392
393long __strncpy_from_user(char *dst, const char __user *src, long count);
394
f7675ad7 395static inline long __must_check
1da177e4
LT
396strncpy_from_user(char *dst, const char __user *src, long count)
397{
dab4079d 398 might_fault();
4f41c2b4 399 return __strncpy_from_user(dst, src, count);
1da177e4
LT
400}
401
211deca6 402unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
4f41c2b4 403
211deca6 404static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
1da177e4 405{
dab4079d 406 might_fault();
4f41c2b4 407 return __strnlen_user(src, n);
1da177e4
LT
408}
409
410/**
411 * strlen_user: - Get the size of a string in user space.
412 * @str: The string to measure.
413 *
b3c395ef
DH
414 * Context: User context only. This function may sleep if pagefaults are
415 * enabled.
1da177e4
LT
416 *
417 * Get the size of a NUL-terminated string in user space.
418 *
419 * Returns the size of the string INCLUDING the terminating NUL.
420 * On exception, returns 0.
421 *
422 * If there is a limit on the length of a valid string, you may wish to
423 * consider using strnlen_user() instead.
424 */
425#define strlen_user(str) strnlen_user(str, ~0UL)
426
427/*
428 * Zero Userspace
429 */
211deca6 430unsigned long __must_check __clear_user(void __user *to, unsigned long size);
1da177e4 431
211deca6 432static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
1da177e4 433{
dab4079d 434 might_fault();
4f41c2b4 435 return __clear_user(to, n);
1da177e4
LT
436}
437
211deca6 438int copy_to_user_real(void __user *dest, void *src, unsigned long count);
8a5d8473 439void s390_kernel_write(void *dst, const void *src, size_t size);
a0616cde 440
1da177e4 441#endif /* __S390_UACCESS_H */