]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/s390/include/asm/uaccess.h
Merge branch 'acpi-config'
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / include / asm / uaccess.h
1 /*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9 #ifndef __S390_UACCESS_H
10 #define __S390_UACCESS_H
11
12 /*
13 * User space memory access functions
14 */
15 #include <linux/sched.h>
16 #include <linux/errno.h>
17 #include <asm/ctl_reg.h>
18
19 #define VERIFY_READ 0
20 #define VERIFY_WRITE 1
21
22
23 /*
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 */
30
31 #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
32
33
34 #define KERNEL_DS MAKE_MM_SEG(0)
35 #define USER_DS MAKE_MM_SEG(1)
36
37 #define get_ds() (KERNEL_DS)
38 #define get_fs() (current->thread.mm_segment)
39
40 #define set_fs(x) \
41 ({ \
42 unsigned long __pto; \
43 current->thread.mm_segment = (x); \
44 __pto = current->thread.mm_segment.ar4 ? \
45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
46 __ctl_load(__pto, 7, 7); \
47 })
48
49 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
50
51 static inline int __range_ok(unsigned long addr, unsigned long size)
52 {
53 return 1;
54 }
55
56 #define __access_ok(addr, size) \
57 ({ \
58 __chk_user_ptr(addr); \
59 __range_ok((unsigned long)(addr), (size)); \
60 })
61
62 #define access_ok(type, addr, size) __access_ok(addr, size)
63
64 /*
65 * The exception table consists of pairs of addresses: the first is the
66 * address of an instruction that is allowed to fault, and the second is
67 * the address at which the program should continue. No registers are
68 * modified, so it is entirely up to the continuation code to figure out
69 * what to do.
70 *
71 * All the routines below use bits of fixup code that are out of line
72 * with the main instruction path. This means when everything is well,
73 * we don't even have to jump over them. Further, they do not intrude
74 * on our cache or tlb entries.
75 */
76
77 struct exception_table_entry
78 {
79 int insn, fixup;
80 };
81
82 static inline unsigned long extable_insn(const struct exception_table_entry *x)
83 {
84 return (unsigned long)&x->insn + x->insn;
85 }
86
87 static inline unsigned long extable_fixup(const struct exception_table_entry *x)
88 {
89 return (unsigned long)&x->fixup + x->fixup;
90 }
91
92 #define ARCH_HAS_SORT_EXTABLE
93 #define ARCH_HAS_SEARCH_EXTABLE
94
95 int __handle_fault(unsigned long, unsigned long, int);
96
97 /**
98 * __copy_from_user: - Copy a block of data from user space, with less checking.
99 * @to: Destination address, in kernel space.
100 * @from: Source address, in user space.
101 * @n: Number of bytes to copy.
102 *
103 * Context: User context only. This function may sleep.
104 *
105 * Copy data from user space to kernel space. Caller must check
106 * the specified block with access_ok() before calling this function.
107 *
108 * Returns number of bytes that could not be copied.
109 * On success, this will be zero.
110 *
111 * If some data could not be copied, this function will pad the copied
112 * data to the requested size using zero bytes.
113 */
114 unsigned long __must_check __copy_from_user(void *to, const void __user *from,
115 unsigned long n);
116
117 /**
118 * __copy_to_user: - Copy a block of data into user space, with less checking.
119 * @to: Destination address, in user space.
120 * @from: Source address, in kernel space.
121 * @n: Number of bytes to copy.
122 *
123 * Context: User context only. This function may sleep.
124 *
125 * Copy data from kernel space to user space. Caller must check
126 * the specified block with access_ok() before calling this function.
127 *
128 * Returns number of bytes that could not be copied.
129 * On success, this will be zero.
130 */
131 unsigned long __must_check __copy_to_user(void __user *to, const void *from,
132 unsigned long n);
133
134 #define __copy_to_user_inatomic __copy_to_user
135 #define __copy_from_user_inatomic __copy_from_user
136
137 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
138 {
139 size = __copy_to_user(ptr, x, size);
140 return size ? -EFAULT : 0;
141 }
142
143 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
144 {
145 size = __copy_from_user(x, ptr, size);
146 return size ? -EFAULT : 0;
147 }
148
149 /*
150 * These are the main single-value transfer routines. They automatically
151 * use the right size if we just have the right pointer type.
152 */
153 #define __put_user(x, ptr) \
154 ({ \
155 __typeof__(*(ptr)) __x = (x); \
156 int __pu_err = -EFAULT; \
157 __chk_user_ptr(ptr); \
158 switch (sizeof (*(ptr))) { \
159 case 1: \
160 case 2: \
161 case 4: \
162 case 8: \
163 __pu_err = __put_user_fn(&__x, ptr, \
164 sizeof(*(ptr))); \
165 break; \
166 default: \
167 __put_user_bad(); \
168 break; \
169 } \
170 __pu_err; \
171 })
172
173 #define put_user(x, ptr) \
174 ({ \
175 might_fault(); \
176 __put_user(x, ptr); \
177 })
178
179
180 int __put_user_bad(void) __attribute__((noreturn));
181
182 #define __get_user(x, ptr) \
183 ({ \
184 int __gu_err = -EFAULT; \
185 __chk_user_ptr(ptr); \
186 switch (sizeof(*(ptr))) { \
187 case 1: { \
188 unsigned char __x; \
189 __gu_err = __get_user_fn(&__x, ptr, \
190 sizeof(*(ptr))); \
191 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
192 break; \
193 }; \
194 case 2: { \
195 unsigned short __x; \
196 __gu_err = __get_user_fn(&__x, ptr, \
197 sizeof(*(ptr))); \
198 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
199 break; \
200 }; \
201 case 4: { \
202 unsigned int __x; \
203 __gu_err = __get_user_fn(&__x, ptr, \
204 sizeof(*(ptr))); \
205 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
206 break; \
207 }; \
208 case 8: { \
209 unsigned long long __x; \
210 __gu_err = __get_user_fn(&__x, ptr, \
211 sizeof(*(ptr))); \
212 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
213 break; \
214 }; \
215 default: \
216 __get_user_bad(); \
217 break; \
218 } \
219 __gu_err; \
220 })
221
222 #define get_user(x, ptr) \
223 ({ \
224 might_fault(); \
225 __get_user(x, ptr); \
226 })
227
228 int __get_user_bad(void) __attribute__((noreturn));
229
230 #define __put_user_unaligned __put_user
231 #define __get_user_unaligned __get_user
232
233 /**
234 * copy_to_user: - Copy a block of data into user space.
235 * @to: Destination address, in user space.
236 * @from: Source address, in kernel space.
237 * @n: Number of bytes to copy.
238 *
239 * Context: User context only. This function may sleep.
240 *
241 * Copy data from kernel space to user space.
242 *
243 * Returns number of bytes that could not be copied.
244 * On success, this will be zero.
245 */
246 static inline unsigned long __must_check
247 copy_to_user(void __user *to, const void *from, unsigned long n)
248 {
249 might_fault();
250 return __copy_to_user(to, from, n);
251 }
252
253 void copy_from_user_overflow(void)
254 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
255 __compiletime_warning("copy_from_user() buffer size is not provably correct")
256 #endif
257 ;
258
259 /**
260 * copy_from_user: - Copy a block of data from user space.
261 * @to: Destination address, in kernel space.
262 * @from: Source address, in user space.
263 * @n: Number of bytes to copy.
264 *
265 * Context: User context only. This function may sleep.
266 *
267 * Copy data from user space to kernel space.
268 *
269 * Returns number of bytes that could not be copied.
270 * On success, this will be zero.
271 *
272 * If some data could not be copied, this function will pad the copied
273 * data to the requested size using zero bytes.
274 */
275 static inline unsigned long __must_check
276 copy_from_user(void *to, const void __user *from, unsigned long n)
277 {
278 unsigned int sz = __compiletime_object_size(to);
279
280 might_fault();
281 if (unlikely(sz != -1 && sz < n)) {
282 copy_from_user_overflow();
283 return n;
284 }
285 return __copy_from_user(to, from, n);
286 }
287
288 unsigned long __must_check
289 __copy_in_user(void __user *to, const void __user *from, unsigned long n);
290
291 static inline unsigned long __must_check
292 copy_in_user(void __user *to, const void __user *from, unsigned long n)
293 {
294 might_fault();
295 return __copy_in_user(to, from, n);
296 }
297
298 /*
299 * Copy a null terminated string from userspace.
300 */
301
302 long __strncpy_from_user(char *dst, const char __user *src, long count);
303
304 static inline long __must_check
305 strncpy_from_user(char *dst, const char __user *src, long count)
306 {
307 might_fault();
308 return __strncpy_from_user(dst, src, count);
309 }
310
311 unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
312
313 static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
314 {
315 might_fault();
316 return __strnlen_user(src, n);
317 }
318
319 /**
320 * strlen_user: - Get the size of a string in user space.
321 * @str: The string to measure.
322 *
323 * Context: User context only. This function may sleep.
324 *
325 * Get the size of a NUL-terminated string in user space.
326 *
327 * Returns the size of the string INCLUDING the terminating NUL.
328 * On exception, returns 0.
329 *
330 * If there is a limit on the length of a valid string, you may wish to
331 * consider using strnlen_user() instead.
332 */
333 #define strlen_user(str) strnlen_user(str, ~0UL)
334
335 /*
336 * Zero Userspace
337 */
338 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
339
340 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
341 {
342 might_fault();
343 return __clear_user(to, n);
344 }
345
346 int copy_to_user_real(void __user *dest, void *src, unsigned long count);
347
348 #endif /* __S390_UACCESS_H */