]>
Commit | Line | Data |
---|---|---|
0aea86a2 CM |
1 | /* |
2 | * Based on arch/arm/include/asm/uaccess.h | |
3 | * | |
4 | * Copyright (C) 2012 ARM Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | #ifndef __ASM_UACCESS_H | |
19 | #define __ASM_UACCESS_H | |
20 | ||
bd38967d | 21 | #include <asm/alternative.h> |
4b65a5db | 22 | #include <asm/kernel-pgtable.h> |
bd38967d CM |
23 | #include <asm/sysreg.h> |
24 | ||
0aea86a2 CM |
25 | /* |
26 | * User space memory access functions | |
27 | */ | |
87261d19 | 28 | #include <linux/bitops.h> |
bffe1baf | 29 | #include <linux/kasan-checks.h> |
0aea86a2 | 30 | #include <linux/string.h> |
0aea86a2 | 31 | |
338d4f49 | 32 | #include <asm/cpufeature.h> |
0aea86a2 | 33 | #include <asm/ptrace.h> |
0aea86a2 CM |
34 | #include <asm/memory.h> |
35 | #include <asm/compiler.h> | |
46583939 | 36 | #include <asm/extable.h> |
0aea86a2 | 37 | |
0aea86a2 | 38 | #define get_ds() (KERNEL_DS) |
0aea86a2 CM |
39 | #define get_fs() (current_thread_info()->addr_limit) |
40 | ||
41 | static inline void set_fs(mm_segment_t fs) | |
42 | { | |
43 | current_thread_info()->addr_limit = fs; | |
57f4959b | 44 | |
0ee32d7b WD |
45 | /* |
46 | * Prevent a mispredicted conditional call to set_fs from forwarding | |
47 | * the wrong address limit to access_ok under speculation. | |
48 | */ | |
49 | dsb(nsh); | |
50 | isb(); | |
51 | ||
5074bec8 TG |
52 | /* On user-mode return, check fs is correct */ |
53 | set_thread_flag(TIF_FSCHECK); | |
54 | ||
57f4959b JM |
55 | /* |
56 | * Enable/disable UAO so that copy_to_user() etc can access | |
57 | * kernel memory with the unprivileged instructions. | |
58 | */ | |
59 | if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) | |
60 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); | |
61 | else | |
62 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, | |
63 | CONFIG_ARM64_UAO)); | |
0aea86a2 CM |
64 | } |
65 | ||
967f0e5d | 66 | #define segment_eq(a, b) ((a) == (b)) |
0aea86a2 | 67 | |
0aea86a2 CM |
68 | /* |
69 | * Test whether a block of memory is a valid user space address. | |
70 | * Returns 1 if the range is valid, 0 otherwise. | |
71 | * | |
72 | * This is equivalent to the following test: | |
31bc4cd7 | 73 | * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 |
0aea86a2 | 74 | */ |
31bc4cd7 RM |
75 | static inline unsigned long __range_ok(unsigned long addr, unsigned long size) |
76 | { | |
77 | unsigned long limit = current_thread_info()->addr_limit; | |
78 | ||
79 | __chk_user_ptr(addr); | |
80 | asm volatile( | |
81 | // A + B <= C + 1 for all A,B,C, in four easy steps: | |
82 | // 1: X = A + B; X' = X % 2^64 | |
83 | " adds %0, %0, %2\n" | |
84 | // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 | |
85 | " csel %1, xzr, %1, hi\n" | |
86 | // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' | |
87 | // to compensate for the carry flag being set in step 4. For | |
88 | // X > 2^64, X' merely has to remain nonzero, which it does. | |
89 | " csinv %0, %0, xzr, cc\n" | |
90 | // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 | |
91 | // comes from the carry in being clear. Otherwise, we are | |
92 | // testing X' - C == 0, subject to the previous adjustments. | |
93 | " sbcs xzr, %0, %1\n" | |
94 | " cset %0, ls\n" | |
95 | : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); | |
96 | ||
97 | return addr; | |
98 | } | |
0aea86a2 | 99 | |
87261d19 | 100 | /* |
7dcd9dd8 KM |
101 | * When dealing with data aborts, watchpoints, or instruction traps we may end |
102 | * up with a tagged userland pointer. Clear the tag to get a sane pointer to | |
103 | * pass on to access_ok(), for instance. | |
87261d19 AP |
104 | */ |
105 | #define untagged_addr(addr) sign_extend64(addr, 55) | |
106 | ||
31bc4cd7 | 107 | #define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) |
12a0ef7b | 108 | #define user_addr_max get_fs |
0aea86a2 | 109 | |
6c94f27a AB |
110 | #define _ASM_EXTABLE(from, to) \ |
111 | " .pushsection __ex_table, \"a\"\n" \ | |
112 | " .align 3\n" \ | |
113 | " .long (" #from " - .), (" #to " - .)\n" \ | |
114 | " .popsection\n" | |
115 | ||
bd38967d CM |
116 | /* |
117 | * User access enabling/disabling. | |
118 | */ | |
4b65a5db CM |
119 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
120 | static inline void __uaccess_ttbr0_disable(void) | |
121 | { | |
ba062308 | 122 | unsigned long flags, ttbr; |
4b65a5db | 123 | |
ba062308 | 124 | local_irq_save(flags); |
65939317 | 125 | ttbr = read_sysreg(ttbr1_el1); |
ba062308 | 126 | ttbr &= ~TTBR_ASID_MASK; |
4b65a5db | 127 | /* reserved_ttbr0 placed at the end of swapper_pg_dir */ |
65939317 WD |
128 | write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1); |
129 | isb(); | |
130 | /* Set reserved ASID */ | |
65939317 | 131 | write_sysreg(ttbr, ttbr1_el1); |
4b65a5db | 132 | isb(); |
ba062308 | 133 | local_irq_restore(flags); |
4b65a5db CM |
134 | } |
135 | ||
136 | static inline void __uaccess_ttbr0_enable(void) | |
137 | { | |
65939317 | 138 | unsigned long flags, ttbr0, ttbr1; |
4b65a5db CM |
139 | |
140 | /* | |
141 | * Disable interrupts to avoid preemption between reading the 'ttbr0' | |
142 | * variable and the MSR. A context switch could trigger an ASID | |
143 | * roll-over and an update of 'ttbr0'. | |
144 | */ | |
145 | local_irq_save(flags); | |
ba062308 | 146 | ttbr0 = READ_ONCE(current_thread_info()->ttbr0); |
65939317 WD |
147 | |
148 | /* Restore active ASID */ | |
149 | ttbr1 = read_sysreg(ttbr1_el1); | |
ba062308 | 150 | ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ |
d74f4384 | 151 | ttbr1 |= ttbr0 & TTBR_ASID_MASK; |
65939317 WD |
152 | write_sysreg(ttbr1, ttbr1_el1); |
153 | isb(); | |
154 | ||
155 | /* Restore user page table */ | |
156 | write_sysreg(ttbr0, ttbr0_el1); | |
4b65a5db CM |
157 | isb(); |
158 | local_irq_restore(flags); | |
159 | } | |
160 | ||
161 | static inline bool uaccess_ttbr0_disable(void) | |
162 | { | |
163 | if (!system_uses_ttbr0_pan()) | |
164 | return false; | |
165 | __uaccess_ttbr0_disable(); | |
166 | return true; | |
167 | } | |
168 | ||
169 | static inline bool uaccess_ttbr0_enable(void) | |
170 | { | |
171 | if (!system_uses_ttbr0_pan()) | |
172 | return false; | |
173 | __uaccess_ttbr0_enable(); | |
174 | return true; | |
175 | } | |
176 | #else | |
177 | static inline bool uaccess_ttbr0_disable(void) | |
178 | { | |
179 | return false; | |
180 | } | |
181 | ||
182 | static inline bool uaccess_ttbr0_enable(void) | |
183 | { | |
184 | return false; | |
185 | } | |
186 | #endif | |
187 | ||
bd38967d CM |
188 | #define __uaccess_disable(alt) \ |
189 | do { \ | |
4b65a5db CM |
190 | if (!uaccess_ttbr0_disable()) \ |
191 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ | |
192 | CONFIG_ARM64_PAN)); \ | |
bd38967d CM |
193 | } while (0) |
194 | ||
195 | #define __uaccess_enable(alt) \ | |
196 | do { \ | |
75037120 | 197 | if (!uaccess_ttbr0_enable()) \ |
4b65a5db CM |
198 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ |
199 | CONFIG_ARM64_PAN)); \ | |
bd38967d CM |
200 | } while (0) |
201 | ||
202 | static inline void uaccess_disable(void) | |
203 | { | |
204 | __uaccess_disable(ARM64_HAS_PAN); | |
205 | } | |
206 | ||
207 | static inline void uaccess_enable(void) | |
208 | { | |
209 | __uaccess_enable(ARM64_HAS_PAN); | |
210 | } | |
211 | ||
212 | /* | |
213 | * These functions are no-ops when UAO is present. | |
214 | */ | |
215 | static inline void uaccess_disable_not_uao(void) | |
216 | { | |
217 | __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); | |
218 | } | |
219 | ||
220 | static inline void uaccess_enable_not_uao(void) | |
221 | { | |
222 | __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); | |
223 | } | |
224 | ||
2e25780a RM |
225 | /* |
226 | * Sanitise a uaccess pointer such that it becomes NULL if above the | |
227 | * current addr_limit. | |
228 | */ | |
229 | #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) | |
230 | static inline void __user *__uaccess_mask_ptr(const void __user *ptr) | |
231 | { | |
232 | void __user *safe_ptr; | |
233 | ||
234 | asm volatile( | |
235 | " bics xzr, %1, %2\n" | |
236 | " csel %0, %1, xzr, eq\n" | |
237 | : "=&r" (safe_ptr) | |
238 | : "r" (ptr), "r" (current_thread_info()->addr_limit) | |
239 | : "cc"); | |
240 | ||
241 | csdb(); | |
242 | return safe_ptr; | |
243 | } | |
244 | ||
0aea86a2 CM |
245 | /* |
246 | * The "__xxx" versions of the user access functions do not verify the address | |
247 | * space - it must have been done previously with a separate "access_ok()" | |
248 | * call. | |
249 | * | |
250 | * The "__xxx_error" versions set the third argument to -EFAULT if an error | |
251 | * occurs, and leave it unchanged on success. | |
252 | */ | |
57f4959b | 253 | #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ |
0aea86a2 | 254 | asm volatile( \ |
57f4959b JM |
255 | "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ |
256 | alt_instr " " reg "1, [%2]\n", feature) \ | |
0aea86a2 CM |
257 | "2:\n" \ |
258 | " .section .fixup, \"ax\"\n" \ | |
259 | " .align 2\n" \ | |
260 | "3: mov %w0, %3\n" \ | |
261 | " mov %1, #0\n" \ | |
262 | " b 2b\n" \ | |
263 | " .previous\n" \ | |
6c94f27a | 264 | _ASM_EXTABLE(1b, 3b) \ |
0aea86a2 CM |
265 | : "+r" (err), "=&r" (x) \ |
266 | : "r" (addr), "i" (-EFAULT)) | |
267 | ||
268 | #define __get_user_err(x, ptr, err) \ | |
269 | do { \ | |
270 | unsigned long __gu_val; \ | |
271 | __chk_user_ptr(ptr); \ | |
bd38967d | 272 | uaccess_enable_not_uao(); \ |
0aea86a2 CM |
273 | switch (sizeof(*(ptr))) { \ |
274 | case 1: \ | |
57f4959b JM |
275 | __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ |
276 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
277 | break; \ |
278 | case 2: \ | |
57f4959b JM |
279 | __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ |
280 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
281 | break; \ |
282 | case 4: \ | |
57f4959b JM |
283 | __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ |
284 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
285 | break; \ |
286 | case 8: \ | |
d135b8b5 | 287 | __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ |
57f4959b | 288 | (err), ARM64_HAS_UAO); \ |
0aea86a2 CM |
289 | break; \ |
290 | default: \ | |
291 | BUILD_BUG(); \ | |
292 | } \ | |
bd38967d | 293 | uaccess_disable_not_uao(); \ |
58fff517 | 294 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
0aea86a2 CM |
295 | } while (0) |
296 | ||
297 | #define __get_user(x, ptr) \ | |
298 | ({ \ | |
299 | int __gu_err = 0; \ | |
300 | __get_user_err((x), (ptr), __gu_err); \ | |
301 | __gu_err; \ | |
302 | }) | |
303 | ||
304 | #define __get_user_error(x, ptr, err) \ | |
305 | ({ \ | |
306 | __get_user_err((x), (ptr), (err)); \ | |
307 | (void)0; \ | |
308 | }) | |
309 | ||
0aea86a2 CM |
310 | #define get_user(x, ptr) \ |
311 | ({ \ | |
1f65c13e | 312 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
56d2ef78 | 313 | might_fault(); \ |
1f65c13e | 314 | access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ |
2e25780a | 315 | __p = uaccess_mask_ptr(__p), __get_user((x), __p) : \ |
0aea86a2 CM |
316 | ((x) = 0, -EFAULT); \ |
317 | }) | |
318 | ||
57f4959b | 319 | #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ |
0aea86a2 | 320 | asm volatile( \ |
57f4959b JM |
321 | "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ |
322 | alt_instr " " reg "1, [%2]\n", feature) \ | |
0aea86a2 CM |
323 | "2:\n" \ |
324 | " .section .fixup,\"ax\"\n" \ | |
325 | " .align 2\n" \ | |
326 | "3: mov %w0, %3\n" \ | |
327 | " b 2b\n" \ | |
328 | " .previous\n" \ | |
6c94f27a | 329 | _ASM_EXTABLE(1b, 3b) \ |
0aea86a2 CM |
330 | : "+r" (err) \ |
331 | : "r" (x), "r" (addr), "i" (-EFAULT)) | |
332 | ||
333 | #define __put_user_err(x, ptr, err) \ | |
334 | do { \ | |
335 | __typeof__(*(ptr)) __pu_val = (x); \ | |
336 | __chk_user_ptr(ptr); \ | |
bd38967d | 337 | uaccess_enable_not_uao(); \ |
0aea86a2 CM |
338 | switch (sizeof(*(ptr))) { \ |
339 | case 1: \ | |
57f4959b JM |
340 | __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ |
341 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
342 | break; \ |
343 | case 2: \ | |
57f4959b JM |
344 | __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ |
345 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
346 | break; \ |
347 | case 4: \ | |
57f4959b JM |
348 | __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ |
349 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
350 | break; \ |
351 | case 8: \ | |
d135b8b5 | 352 | __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ |
57f4959b | 353 | (err), ARM64_HAS_UAO); \ |
0aea86a2 CM |
354 | break; \ |
355 | default: \ | |
356 | BUILD_BUG(); \ | |
357 | } \ | |
bd38967d | 358 | uaccess_disable_not_uao(); \ |
0aea86a2 CM |
359 | } while (0) |
360 | ||
361 | #define __put_user(x, ptr) \ | |
362 | ({ \ | |
363 | int __pu_err = 0; \ | |
364 | __put_user_err((x), (ptr), __pu_err); \ | |
365 | __pu_err; \ | |
366 | }) | |
367 | ||
368 | #define __put_user_error(x, ptr, err) \ | |
369 | ({ \ | |
370 | __put_user_err((x), (ptr), (err)); \ | |
371 | (void)0; \ | |
372 | }) | |
373 | ||
0aea86a2 CM |
374 | #define put_user(x, ptr) \ |
375 | ({ \ | |
1f65c13e | 376 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
56d2ef78 | 377 | might_fault(); \ |
1f65c13e | 378 | access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ |
2e25780a | 379 | __p = uaccess_mask_ptr(__p), __put_user((x), __p) : \ |
0aea86a2 CM |
380 | -EFAULT; \ |
381 | }) | |
382 | ||
bffe1baf | 383 | extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); |
92430dab | 384 | #define raw_copy_from_user __arch_copy_from_user |
bffe1baf | 385 | extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); |
92430dab AV |
386 | #define raw_copy_to_user __arch_copy_to_user |
387 | extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); | |
0aea86a2 | 388 | extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); |
92430dab AV |
389 | #define INLINE_COPY_TO_USER |
390 | #define INLINE_COPY_FROM_USER | |
0aea86a2 CM |
391 | |
392 | static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) | |
393 | { | |
394 | if (access_ok(VERIFY_WRITE, to, n)) | |
2e25780a | 395 | n = __clear_user(__uaccess_mask_ptr(to), n); |
0aea86a2 CM |
396 | return n; |
397 | } | |
398 | ||
12a0ef7b | 399 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
0aea86a2 | 400 | |
12a0ef7b | 401 | extern __must_check long strnlen_user(const char __user *str, long n); |
0aea86a2 | 402 | |
574cf4cb RM |
403 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
404 | struct page; | |
405 | void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); | |
406 | extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); | |
407 | ||
408 | static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) | |
409 | { | |
410 | kasan_check_write(dst, size); | |
411 | return __copy_user_flushcache(dst, src, size); | |
412 | } | |
413 | #endif | |
414 | ||
0aea86a2 | 415 | #endif /* __ASM_UACCESS_H */ |