]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/s390/include/asm/uaccess.h
generic ...copy_..._user primitives
[mirror_ubuntu-focal-kernel.git] / arch / s390 / include / asm / uaccess.h
CommitLineData
1da177e4 1/*
1da177e4 2 * S390 version
a53c8fab 3 * Copyright IBM Corp. 1999, 2000
1da177e4
LT
4 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9#ifndef __S390_UACCESS_H
10#define __S390_UACCESS_H
11
12/*
13 * User space memory access functions
14 */
b5a882fc 15#include <asm/processor.h>
a0616cde 16#include <asm/ctl_reg.h>
1da177e4 17
1da177e4
LT
18
19/*
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
23 *
24 * For historical reasons, these macros are grossly misnamed.
25 */
26
27#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
28
29
30#define KERNEL_DS MAKE_MM_SEG(0)
31#define USER_DS MAKE_MM_SEG(1)
32
33#define get_ds() (KERNEL_DS)
34#define get_fs() (current->thread.mm_segment)
1da177e4
LT
35#define segment_eq(a,b) ((a).ar4 == (b).ar4)
36
b5a882fc
HC
37static inline void set_fs(mm_segment_t fs)
38{
39 current->thread.mm_segment = fs;
db68ce10 40 if (uaccess_kernel()) {
b5a882fc
HC
41 set_cpu_flag(CIF_ASCE_SECONDARY);
42 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
43 } else {
44 clear_cpu_flag(CIF_ASCE_SECONDARY);
45 __ctl_load(S390_lowcore.user_asce, 7, 7);
46 }
47}
48
491af990
HC
49static inline int __range_ok(unsigned long addr, unsigned long size)
50{
51 return 1;
52}
53
54#define __access_ok(addr, size) \
55({ \
56 __chk_user_ptr(addr); \
57 __range_ok((unsigned long)(addr), (size)); \
7683f744 58})
1da177e4 59
7683f744 60#define access_ok(type, addr, size) __access_ok(addr, size)
1da177e4 61
1da177e4
LT
62/*
63 * The exception table consists of pairs of addresses: the first is the
64 * address of an instruction that is allowed to fault, and the second is
65 * the address at which the program should continue. No registers are
66 * modified, so it is entirely up to the continuation code to figure out
67 * what to do.
68 *
69 * All the routines below use bits of fixup code that are out of line
70 * with the main instruction path. This means when everything is well,
71 * we don't even have to jump over them. Further, they do not intrude
72 * on our cache or tlb entries.
73 */
74
75struct exception_table_entry
76{
eb608fb3 77 int insn, fixup;
1da177e4
LT
78};
79
eb608fb3
HC
80static inline unsigned long extable_fixup(const struct exception_table_entry *x)
81{
82 return (unsigned long)&x->fixup + x->fixup;
83}
84
c352e8b6 85#define ARCH_HAS_RELATIVE_EXTABLE
eb608fb3 86
4f41c2b4
HC
87/**
88 * __copy_from_user: - Copy a block of data from user space, with less checking.
89 * @to: Destination address, in kernel space.
90 * @from: Source address, in user space.
91 * @n: Number of bytes to copy.
92 *
b3c395ef
DH
93 * Context: User context only. This function may sleep if pagefaults are
94 * enabled.
4f41c2b4
HC
95 *
96 * Copy data from user space to kernel space. Caller must check
97 * the specified block with access_ok() before calling this function.
98 *
99 * Returns number of bytes that could not be copied.
100 * On success, this will be zero.
101 *
102 * If some data could not be copied, this function will pad the copied
103 * data to the requested size using zero bytes.
104 */
211deca6
HC
105unsigned long __must_check __copy_from_user(void *to, const void __user *from,
106 unsigned long n);
4f41c2b4
HC
107
108/**
109 * __copy_to_user: - Copy a block of data into user space, with less checking.
110 * @to: Destination address, in user space.
111 * @from: Source address, in kernel space.
112 * @n: Number of bytes to copy.
113 *
b3c395ef
DH
114 * Context: User context only. This function may sleep if pagefaults are
115 * enabled.
4f41c2b4
HC
116 *
117 * Copy data from kernel space to user space. Caller must check
118 * the specified block with access_ok() before calling this function.
119 *
120 * Returns number of bytes that could not be copied.
121 * On success, this will be zero.
122 */
123unsigned long __must_check __copy_to_user(void __user *to, const void *from,
124 unsigned long n);
d02765d1 125
4f41c2b4
HC
126#define __copy_to_user_inatomic __copy_to_user
127#define __copy_from_user_inatomic __copy_from_user
6c1e3e79 128
c9ca7841
HC
129#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
130
131#define __put_get_user_asm(to, from, size, spec) \
132({ \
133 register unsigned long __reg0 asm("0") = spec; \
134 int __rc; \
135 \
136 asm volatile( \
137 "0: mvcos %1,%3,%2\n" \
138 "1: xr %0,%0\n" \
139 "2:\n" \
140 ".pushsection .fixup, \"ax\"\n" \
141 "3: lhi %0,%5\n" \
142 " jg 2b\n" \
143 ".popsection\n" \
144 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
145 : "=d" (__rc), "=Q" (*(to)) \
146 : "d" (size), "Q" (*(from)), \
147 "d" (__reg0), "K" (-EFAULT) \
148 : "cc"); \
149 __rc; \
150})
151
dc4aace1
HC
152static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
153{
154 unsigned long spec = 0x810000UL;
155 int rc;
156
157 switch (size) {
158 case 1:
159 rc = __put_get_user_asm((unsigned char __user *)ptr,
160 (unsigned char *)x,
161 size, spec);
162 break;
163 case 2:
164 rc = __put_get_user_asm((unsigned short __user *)ptr,
165 (unsigned short *)x,
166 size, spec);
167 break;
168 case 4:
169 rc = __put_get_user_asm((unsigned int __user *)ptr,
170 (unsigned int *)x,
171 size, spec);
172 break;
173 case 8:
174 rc = __put_get_user_asm((unsigned long __user *)ptr,
175 (unsigned long *)x,
176 size, spec);
177 break;
0b925159 178 }
dc4aace1
HC
179 return rc;
180}
181
182static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
183{
184 unsigned long spec = 0x81UL;
185 int rc;
186
187 switch (size) {
188 case 1:
189 rc = __put_get_user_asm((unsigned char *)x,
190 (unsigned char __user *)ptr,
191 size, spec);
192 break;
193 case 2:
194 rc = __put_get_user_asm((unsigned short *)x,
195 (unsigned short __user *)ptr,
196 size, spec);
197 break;
198 case 4:
199 rc = __put_get_user_asm((unsigned int *)x,
200 (unsigned int __user *)ptr,
201 size, spec);
202 break;
203 case 8:
204 rc = __put_get_user_asm((unsigned long *)x,
205 (unsigned long __user *)ptr,
206 size, spec);
207 break;
0b925159 208 }
dc4aace1
HC
209 return rc;
210}
c9ca7841
HC
211
212#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
213
211deca6 214static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
d02765d1 215{
4f41c2b4
HC
216 size = __copy_to_user(ptr, x, size);
217 return size ? -EFAULT : 0;
d02765d1
GS
218}
219
211deca6 220static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
d02765d1 221{
4f41c2b4
HC
222 size = __copy_from_user(x, ptr, size);
223 return size ? -EFAULT : 0;
d02765d1 224}
1da177e4 225
c9ca7841
HC
226#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
227
1da177e4
LT
228/*
229 * These are the main single-value transfer routines. They automatically
230 * use the right size if we just have the right pointer type.
231 */
1da177e4
LT
232#define __put_user(x, ptr) \
233({ \
234 __typeof__(*(ptr)) __x = (x); \
d02765d1 235 int __pu_err = -EFAULT; \
17566c3c 236 __chk_user_ptr(ptr); \
1da177e4
LT
237 switch (sizeof (*(ptr))) { \
238 case 1: \
239 case 2: \
240 case 4: \
241 case 8: \
cfa785e6
HC
242 __pu_err = __put_user_fn(&__x, ptr, \
243 sizeof(*(ptr))); \
1da177e4
LT
244 break; \
245 default: \
246 __put_user_bad(); \
247 break; \
248 } \
ee64baf4 249 __builtin_expect(__pu_err, 0); \
1da177e4 250})
1da177e4
LT
251
252#define put_user(x, ptr) \
253({ \
dab4079d 254 might_fault(); \
1da177e4
LT
255 __put_user(x, ptr); \
256})
257
258
4f41c2b4 259int __put_user_bad(void) __attribute__((noreturn));
1da177e4 260
1da177e4
LT
261#define __get_user(x, ptr) \
262({ \
d02765d1
GS
263 int __gu_err = -EFAULT; \
264 __chk_user_ptr(ptr); \
1da177e4 265 switch (sizeof(*(ptr))) { \
1047aa77 266 case 1: { \
fd2d2b19 267 unsigned char __x = 0; \
cfa785e6
HC
268 __gu_err = __get_user_fn(&__x, ptr, \
269 sizeof(*(ptr))); \
97fa5a66 270 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
271 break; \
272 }; \
273 case 2: { \
fd2d2b19 274 unsigned short __x = 0; \
cfa785e6
HC
275 __gu_err = __get_user_fn(&__x, ptr, \
276 sizeof(*(ptr))); \
97fa5a66 277 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
278 break; \
279 }; \
280 case 4: { \
fd2d2b19 281 unsigned int __x = 0; \
cfa785e6
HC
282 __gu_err = __get_user_fn(&__x, ptr, \
283 sizeof(*(ptr))); \
97fa5a66 284 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
285 break; \
286 }; \
287 case 8: { \
fd2d2b19 288 unsigned long long __x = 0; \
cfa785e6
HC
289 __gu_err = __get_user_fn(&__x, ptr, \
290 sizeof(*(ptr))); \
97fa5a66 291 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1da177e4 292 break; \
1047aa77 293 }; \
1da177e4
LT
294 default: \
295 __get_user_bad(); \
296 break; \
297 } \
ee64baf4 298 __builtin_expect(__gu_err, 0); \
1da177e4 299})
1da177e4
LT
300
301#define get_user(x, ptr) \
302({ \
dab4079d 303 might_fault(); \
1da177e4
LT
304 __get_user(x, ptr); \
305})
306
4f41c2b4 307int __get_user_bad(void) __attribute__((noreturn));
1da177e4
LT
308
309#define __put_user_unaligned __put_user
310#define __get_user_unaligned __get_user
311
0d025d27
JP
312extern void __compiletime_error("usercopy buffer size is too small")
313__bad_copy_user(void);
314
315static inline void copy_user_overflow(int size, unsigned long count)
316{
317 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
318}
319
1da177e4
LT
320/**
321 * copy_to_user: - Copy a block of data into user space.
322 * @to: Destination address, in user space.
323 * @from: Source address, in kernel space.
324 * @n: Number of bytes to copy.
325 *
b3c395ef
DH
326 * Context: User context only. This function may sleep if pagefaults are
327 * enabled.
1da177e4
LT
328 *
329 * Copy data from kernel space to user space.
330 *
331 * Returns number of bytes that could not be copied.
332 * On success, this will be zero.
333 */
f7675ad7 334static inline unsigned long __must_check
1da177e4
LT
335copy_to_user(void __user *to, const void *from, unsigned long n)
336{
dab4079d 337 might_fault();
d12a2970 338 return __copy_to_user(to, from, n);
1da177e4
LT
339}
340
1da177e4
LT
341/**
342 * copy_from_user: - Copy a block of data from user space.
343 * @to: Destination address, in kernel space.
344 * @from: Source address, in user space.
345 * @n: Number of bytes to copy.
346 *
b3c395ef
DH
347 * Context: User context only. This function may sleep if pagefaults are
348 * enabled.
1da177e4
LT
349 *
350 * Copy data from user space to kernel space.
351 *
352 * Returns number of bytes that could not be copied.
353 * On success, this will be zero.
354 *
355 * If some data could not be copied, this function will pad the copied
356 * data to the requested size using zero bytes.
357 */
f7675ad7 358static inline unsigned long __must_check
1da177e4
LT
359copy_from_user(void *to, const void __user *from, unsigned long n)
360{
1dcec254
HC
361 unsigned int sz = __compiletime_object_size(to);
362
dab4079d 363 might_fault();
1dcec254 364 if (unlikely(sz != -1 && sz < n)) {
0d025d27
JP
365 if (!__builtin_constant_p(n))
366 copy_user_overflow(sz, n);
367 else
368 __bad_copy_user();
1dcec254
HC
369 return n;
370 }
d12a2970 371 return __copy_from_user(to, from, n);
1da177e4
LT
372}
373
4f41c2b4
HC
374unsigned long __must_check
375__copy_in_user(void __user *to, const void __user *from, unsigned long n);
1da177e4 376
f7675ad7 377static inline unsigned long __must_check
1da177e4
LT
378copy_in_user(void __user *to, const void __user *from, unsigned long n)
379{
dab4079d 380 might_fault();
d12a2970 381 return __copy_in_user(to, from, n);
1da177e4
LT
382}
383
384/*
385 * Copy a null terminated string from userspace.
386 */
4f41c2b4
HC
387
388long __strncpy_from_user(char *dst, const char __user *src, long count);
389
f7675ad7 390static inline long __must_check
1da177e4
LT
391strncpy_from_user(char *dst, const char __user *src, long count)
392{
dab4079d 393 might_fault();
4f41c2b4 394 return __strncpy_from_user(dst, src, count);
1da177e4
LT
395}
396
211deca6 397unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
4f41c2b4 398
211deca6 399static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
1da177e4 400{
dab4079d 401 might_fault();
4f41c2b4 402 return __strnlen_user(src, n);
1da177e4
LT
403}
404
405/**
406 * strlen_user: - Get the size of a string in user space.
407 * @str: The string to measure.
408 *
b3c395ef
DH
409 * Context: User context only. This function may sleep if pagefaults are
410 * enabled.
1da177e4
LT
411 *
412 * Get the size of a NUL-terminated string in user space.
413 *
414 * Returns the size of the string INCLUDING the terminating NUL.
415 * On exception, returns 0.
416 *
417 * If there is a limit on the length of a valid string, you may wish to
418 * consider using strnlen_user() instead.
419 */
420#define strlen_user(str) strnlen_user(str, ~0UL)
421
422/*
423 * Zero Userspace
424 */
211deca6 425unsigned long __must_check __clear_user(void __user *to, unsigned long size);
1da177e4 426
211deca6 427static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
1da177e4 428{
dab4079d 429 might_fault();
4f41c2b4 430 return __clear_user(to, n);
1da177e4
LT
431}
432
211deca6 433int copy_to_user_real(void __user *dest, void *src, unsigned long count);
8a5d8473 434void s390_kernel_write(void *dst, const void *src, size_t size);
a0616cde 435
1da177e4 436#endif /* __S390_UACCESS_H */