]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/parisc/include/asm/uaccess.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / arch / parisc / include / asm / uaccess.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef __PARISC_UACCESS_H
3#define __PARISC_UACCESS_H
4
5/*
6 * User space memory access functions
7 */
1da177e4 8#include <asm/page.h>
1da177e4 9#include <asm/cache.h>
1da177e4 10
8dd95c68 11#include <linux/bug.h>
aace880f 12#include <linux/string.h>
1da177e4
LT
13
14#define KERNEL_DS ((mm_segment_t){0})
15#define USER_DS ((mm_segment_t){1})
16
b9762e7b 17#define segment_eq(a, b) ((a).seg == (b).seg)
1da177e4
LT
18
19#define get_ds() (KERNEL_DS)
20#define get_fs() (current_thread_info()->addr_limit)
21#define set_fs(x) (current_thread_info()->addr_limit = (x))
22
23/*
24 * Note that since kernel addresses are in a separate address space on
e49332bd 25 * parisc, we don't need to do anything for access_ok().
1da177e4
LT
26 * We just let the page fault handler do the right thing. This also means
27 * that put_user is the same as __put_user, etc.
28 */
29
186ecf14
HD
30#define access_ok(type, uaddr, size) \
31 ( (uaddr) == (uaddr) )
1da177e4 32
1da177e4
LT
33#define put_user __put_user
34#define get_user __get_user
35
ca72a223 36#if !defined(CONFIG_64BIT)
3f795cef 37#define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
b9762e7b 38#define STD_USER(x, ptr) __put_user_asm64(x, ptr)
1da177e4 39#else
3f795cef 40#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
b9762e7b 41#define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
1da177e4
LT
42#endif
43
44/*
cb910c17
HD
45 * The exception table contains two values: the first is the relative offset to
46 * the address of the instruction that is allowed to fault, and the second is
47 * the relative offset to the address of the fixup routine. Since relative
48 * addresses are used, 32bit values are sufficient even on 64bit kernel.
1da177e4
LT
49 */
50
0de79858 51#define ARCH_HAS_RELATIVE_EXTABLE
1da177e4 52struct exception_table_entry {
0de79858
HD
53 int insn; /* relative address of insn that is allowed to fault. */
54 int fixup; /* relative address of fixup routine */
1da177e4
LT
55};
56
0b3d643f
HD
57#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
58 ".section __ex_table,\"aw\"\n" \
0de79858 59 ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
0b3d643f
HD
60 ".previous\n"
61
d19f5e41
HD
62/*
63 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
64 * (with lowest bit set) for which the fault handler in fixup_exception() will
65 * load -EFAULT into %r8 for a read or write fault, and zeroes the target
66 * register in case of a read fault in get_user().
67 */
68#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
69 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
70
06bff6b9
HD
71/*
72 * load_sr2() preloads the space register %%sr2 - based on the value of
73 * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
74 * is 0), or with the current value of %%sr3 to access user space (USER_DS)
75 * memory. The following __get_user_asm() and __put_user_asm() functions have
76 * %%sr2 hard-coded to access the requested memory.
77 */
78#define load_sr2() \
79 __asm__(" or,= %0,%%r0,%%r0\n\t" \
80 " mfsp %%sr3,%0\n\t" \
81 " mtsp %0,%%sr2\n\t" \
82 : : "r"(get_fs()) : )
83
3f795cef
HD
84#define __get_user_internal(val, ptr) \
85({ \
86 register long __gu_err __asm__ ("r8") = 0; \
87 \
88 switch (sizeof(*(ptr))) { \
89 case 1: __get_user_asm(val, "ldb", ptr); break; \
90 case 2: __get_user_asm(val, "ldh", ptr); break; \
91 case 4: __get_user_asm(val, "ldw", ptr); break; \
92 case 8: LDD_USER(val, ptr); break; \
93 default: BUILD_BUG(); \
94 } \
95 \
96 __gu_err; \
1da177e4
LT
97})
98
3f795cef
HD
99#define __get_user(val, ptr) \
100({ \
101 load_sr2(); \
102 __get_user_internal(val, ptr); \
103})
104
105#define __get_user_asm(val, ldx, ptr) \
106{ \
107 register long __gu_val; \
108 \
d19f5e41
HD
109 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
110 "9:\n" \
111 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
1da177e4 112 : "=r"(__gu_val), "=r"(__gu_err) \
3f795cef
HD
113 : "r"(ptr), "1"(__gu_err)); \
114 \
115 (val) = (__force __typeof__(*(ptr))) __gu_val; \
116}
1da177e4 117
d2ad824f
HD
118#if !defined(CONFIG_64BIT)
119
3f795cef
HD
120#define __get_user_asm64(val, ptr) \
121{ \
122 union { \
123 unsigned long long l; \
124 __typeof__(*(ptr)) t; \
125 } __gu_tmp; \
126 \
d19f5e41
HD
127 __asm__(" copy %%r0,%R0\n" \
128 "1: ldw 0(%%sr2,%2),%0\n" \
129 "2: ldw 4(%%sr2,%2),%R0\n" \
130 "9:\n" \
131 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
132 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
3f795cef
HD
133 : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
134 : "r"(ptr), "1"(__gu_err)); \
135 \
136 (val) = __gu_tmp.t; \
137}
d2ad824f
HD
138
139#endif /* !defined(CONFIG_64BIT) */
140
141
3f795cef 142#define __put_user_internal(x, ptr) \
1da177e4
LT
143({ \
144 register long __pu_err __asm__ ("r8") = 0; \
145 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
146 \
06bff6b9 147 switch (sizeof(*(ptr))) { \
3f795cef
HD
148 case 1: __put_user_asm("stb", __x, ptr); break; \
149 case 2: __put_user_asm("sth", __x, ptr); break; \
150 case 4: __put_user_asm("stw", __x, ptr); break; \
151 case 8: STD_USER(__x, ptr); break; \
152 default: BUILD_BUG(); \
153 } \
1da177e4
LT
154 \
155 __pu_err; \
156})
157
3f795cef
HD
158#define __put_user(x, ptr) \
159({ \
160 load_sr2(); \
161 __put_user_internal(x, ptr); \
162})
163
164
1da177e4
LT
165/*
166 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
167 * instead of writing. This is because they do not write to any memory
3fd3a74f 168 * gcc knows about, so there are no aliasing issues. These macros must
d19f5e41
HD
169 * also be aware that fixups are executed in the context of the fault,
170 * and any registers used there must be listed as clobbers.
171 * r8 is already listed as err.
1da177e4
LT
172 */
173
b9762e7b 174#define __put_user_asm(stx, x, ptr) \
1da177e4 175 __asm__ __volatile__ ( \
d19f5e41
HD
176 "1: " stx " %2,0(%%sr2,%1)\n" \
177 "9:\n" \
178 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
1da177e4 179 : "=r"(__pu_err) \
d19f5e41 180 : "r"(ptr), "r"(x), "0"(__pu_err))
1da177e4 181
1da177e4 182
ca72a223 183#if !defined(CONFIG_64BIT)
94a1981d 184
b9762e7b 185#define __put_user_asm64(__val, ptr) do { \
1da177e4 186 __asm__ __volatile__ ( \
d19f5e41
HD
187 "1: stw %2,0(%%sr2,%1)\n" \
188 "2: stw %R2,4(%%sr2,%1)\n" \
189 "9:\n" \
190 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
191 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
1da177e4 192 : "=r"(__pu_err) \
d19f5e41 193 : "r"(ptr), "r"(__val), "0"(__pu_err)); \
1da177e4
LT
194} while (0)
195
ca72a223 196#endif /* !defined(CONFIG_64BIT) */
1da177e4
LT
197
198
199/*
200 * Complex access routines -- external declarations
201 */
202
b1195c0e 203extern long strncpy_from_user(char *, const char __user *, long);
b9762e7b
MT
204extern unsigned lclear_user(void __user *, unsigned long);
205extern long lstrnlen_user(const char __user *, long);
1da177e4
LT
206/*
207 * Complex access routines -- macros
208 */
a0ffa8f0 209#define user_addr_max() (~0UL)
1da177e4 210
1da177e4 211#define strnlen_user lstrnlen_user
1da177e4
LT
212#define clear_user lclear_user
213#define __clear_user lclear_user
214
f64fd180
AV
215unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
216 unsigned long len);
217unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
218 unsigned long len);
219unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
220 unsigned long len);
221#define INLINE_COPY_TO_USER
222#define INLINE_COPY_FROM_USER
888c31fc 223
e448372c 224struct pt_regs;
c61c25eb
KM
225int fixup_exception(struct pt_regs *regs);
226
1da177e4 227#endif /* __PARISC_UACCESS_H */