]>
git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/m68k/include/asm/uaccess_mm.h
1 #ifndef __M68K_UACCESS_H
2 #define __M68K_UACCESS_H
5 * User space memory access functions
7 #include <linux/compiler.h>
8 #include <linux/types.h>
9 #include <asm/segment.h>
11 /* We let the MMU do all checking */
12 static inline int access_ok(int type
, const void __user
*addr
,
19 * Not all varients of the 68k family support the notion of address spaces.
20 * The traditional 680x0 parts do, and they use the sfc/dfc registers and
21 * the "moves" instruction to access user space from kernel space. Other
22 * family members like ColdFire don't support this, and only have a single
23 * address space, and use the usual "move" instruction for user space access.
25 * Outside of this difference the user space access functions are the same.
26 * So lets keep the code simple and just define in what we need to use.
28 #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
34 extern int __put_user_bad(void);
35 extern int __get_user_bad(void);
37 #define __put_user_asm(res, x, ptr, bwl, reg, err) \
39 "1: "MOVES"."#bwl" %2,%1\n" \
41 " .section .fixup,\"ax\"\n" \
43 "10: moveq.l %3,%0\n" \
47 " .section __ex_table,\"a\"\n" \
52 : "+d" (res), "=m" (*(ptr)) \
53 : #reg (x), "i" (err))
56 * These are the main single-value transfer routines. They automatically
57 * use the right size if we just have the right pointer type.
60 #define __put_user(x, ptr) \
62 typeof(*(ptr)) __pu_val = (x); \
64 __chk_user_ptr(ptr); \
65 switch (sizeof (*(ptr))) { \
67 __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
70 __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \
73 __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
77 const void __user *__pu_ptr = (ptr); \
79 "1: "MOVES".l %2,(%1)+\n" \
80 "2: "MOVES".l %R2,(%1)\n" \
82 " .section .fixup,\"ax\"\n" \
88 " .section __ex_table,\"a\"\n" \
94 : "+d" (__pu_err), "+a" (__pu_ptr) \
95 : "r" (__pu_val), "i" (-EFAULT) \
100 __pu_err = __put_user_bad(); \
105 #define put_user(x, ptr) __put_user(x, ptr)
108 #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
111 "1: "MOVES"."#bwl" %2,%1\n" \
113 " .section .fixup,\"ax\"\n" \
115 "10: move.l %3,%0\n" \
120 " .section __ex_table,\"a\"\n" \
124 : "+d" (res), "=&" #reg (__gu_val) \
125 : "m" (*(ptr)), "i" (err)); \
126 (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \
129 #define __get_user(x, ptr) \
132 __chk_user_ptr(ptr); \
133 switch (sizeof(*(ptr))) { \
135 __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
138 __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \
141 __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
143 /* case 8: disabled because gcc-4.1 has a broken typeof \
145 const void *__gu_ptr = (ptr); \
148 "1: "MOVES".l (%2)+,%1\n" \
149 "2: "MOVES".l (%2),%R1\n" \
151 " .section .fixup,\"ax\"\n" \
153 "10: move.l %3,%0\n" \
159 " .section __ex_table,\"a\"\n" \
164 : "+d" (__gu_err), "=&r" (__gu_val), \
168 (x) = (__force typeof(*(ptr)))__gu_val; \
172 __gu_err = __get_user_bad(); \
177 #define get_user(x, ptr) __get_user(x, ptr)
179 unsigned long __generic_copy_from_user(void *to
, const void __user
*from
, unsigned long n
);
180 unsigned long __generic_copy_to_user(void __user
*to
, const void *from
, unsigned long n
);
187 #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
189 "1: "MOVES"."#s1" (%2)+,%3\n" \
190 " move."#s1" %3,(%1)+\n" \
191 " .ifnc \""#s2"\",\"\"\n" \
192 "2: "MOVES"."#s2" (%2)+,%3\n" \
193 " move."#s2" %3,(%1)+\n" \
194 " .ifnc \""#s3"\",\"\"\n" \
195 "3: "MOVES"."#s3" (%2)+,%3\n" \
196 " move."#s3" %3,(%1)+\n" \
200 " .section __ex_table,\"a\"\n" \
203 " .ifnc \""#s2"\",\"\"\n" \
205 " .ifnc \""#s3"\",\"\"\n" \
211 " .section .fixup,\"ax\"\n" \
213 "10: addq.l #"#n1",%0\n" \
214 " .ifnc \""#s2"\",\"\"\n" \
215 "20: addq.l #"#n2",%0\n" \
216 " .ifnc \""#s3"\",\"\"\n" \
217 "30: addq.l #"#n3",%0\n" \
222 : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
225 #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
226 ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
227 #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \
228 ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \
229 __suffix##n1, __suffix##n2, __suffix##n3)
231 static __always_inline
unsigned long
232 __constant_copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
234 unsigned long res
= 0, tmp
;
238 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 1, 0, 0);
241 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 2, 0, 0);
244 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 2, 1, 0);
247 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 4, 0, 0);
250 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 4, 1, 0);
253 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 4, 2, 0);
256 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 4, 2, 1);
259 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 4, 4, 0);
262 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 4, 4, 1);
265 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 4, 4, 2);
268 __constant_copy_from_user_asm(res
, to
, from
, tmp
, 4, 4, 4);
271 /* we limit the inlined version to 3 moves */
272 return __generic_copy_from_user(to
, from
, n
);
278 #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
280 " move."#s1" (%2)+,%3\n" \
281 "11: "MOVES"."#s1" %3,(%1)+\n" \
282 "12: move."#s2" (%2)+,%3\n" \
283 "21: "MOVES"."#s2" %3,(%1)+\n" \
285 " .ifnc \""#s3"\",\"\"\n" \
286 " move."#s3" (%2)+,%3\n" \
287 "31: "MOVES"."#s3" %3,(%1)+\n" \
292 " .section __ex_table,\"a\"\n" \
298 " .ifnc \""#s3"\",\"\"\n" \
304 " .section .fixup,\"ax\"\n" \
306 "5: moveq.l #"#n",%0\n" \
309 : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
312 static __always_inline
unsigned long
313 __constant_copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
315 unsigned long res
= 0, tmp
;
319 __put_user_asm(res
, *(u8
*)from
, (u8 __user
*)to
, b
, d
, 1);
322 __put_user_asm(res
, *(u16
*)from
, (u16 __user
*)to
, w
, r
, 2);
325 __constant_copy_to_user_asm(res
, to
, from
, tmp
, 3, w
, b
,);
328 __put_user_asm(res
, *(u32
*)from
, (u32 __user
*)to
, l
, r
, 4);
331 __constant_copy_to_user_asm(res
, to
, from
, tmp
, 5, l
, b
,);
334 __constant_copy_to_user_asm(res
, to
, from
, tmp
, 6, l
, w
,);
337 __constant_copy_to_user_asm(res
, to
, from
, tmp
, 7, l
, w
, b
);
340 __constant_copy_to_user_asm(res
, to
, from
, tmp
, 8, l
, l
,);
343 __constant_copy_to_user_asm(res
, to
, from
, tmp
, 9, l
, l
, b
);
346 __constant_copy_to_user_asm(res
, to
, from
, tmp
, 10, l
, l
, w
);
349 __constant_copy_to_user_asm(res
, to
, from
, tmp
, 12, l
, l
, l
);
352 /* limit the inlined version to 3 moves */
353 return __generic_copy_to_user(to
, from
, n
);
359 static inline unsigned long
360 raw_copy_from_user(void *to
, const void __user
*from
, unsigned long n
)
362 if (__builtin_constant_p(n
))
363 return __constant_copy_from_user(to
, from
, n
);
364 return __generic_copy_from_user(to
, from
, n
);
367 static inline unsigned long
368 raw_copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
370 if (__builtin_constant_p(n
))
371 return __constant_copy_to_user(to
, from
, n
);
372 return __generic_copy_to_user(to
, from
, n
);
374 #define INLINE_COPY_FROM_USER
375 #define INLINE_COPY_TO_USER
377 #define user_addr_max() \
378 (uaccess_kernel() ? ~0UL : TASK_SIZE)
380 extern long strncpy_from_user(char *dst
, const char __user
*src
, long count
);
381 extern __must_check
long strnlen_user(const char __user
*str
, long n
);
383 unsigned long __clear_user(void __user
*to
, unsigned long n
);
385 #define clear_user __clear_user
387 #endif /* _M68K_UACCESS_H */