2 * Handle unaligned accesses by emulation.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2014 Imagination Technologies Ltd.
12 * This file contains exception handler for address error exception with the
13 * special capability to execute faulting instructions in software. The
14 * handler does not try to handle the case when the program counter points
15 * to an address not aligned to a word boundary.
17 * Putting data to unaligned addresses is a bad practice even on Intel where
18 * only the performance is affected. Much worse is that such code is non-
19 * portable. Due to several programs that die on MIPS due to alignment
20 * problems I decided to implement this handler anyway though I originally
21 * didn't intend to do this at all for user code.
23 * For now I enable fixing of address errors by default to make life easier.
24 * I however intend to disable this somewhen in the future when the alignment
25 * problems with user programs have been fixed. For programmers this is the
28 * Fixing address errors is a per process option. The option is inherited
29 * across fork(2) and execve(2) calls. If you really want to use the
30 * option in your user programs - I discourage the use of the software
31 * emulation strongly - use the following code in your userland stuff:
33 * #include <sys/sysmips.h>
36 * sysmips(MIPS_FIXADE, x);
39 * The argument x is 0 for disabling software emulation, enabled otherwise.
41 * Below a little program to play around with this feature.
44 * #include <sys/sysmips.h>
47 * unsigned char bar[8];
50 * main(int argc, char *argv[])
52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53 * unsigned int *p = (unsigned int *) (x.bar + 3);
57 * sysmips(MIPS_FIXADE, atoi(argv[1]));
59 * printf("*p = %08lx\n", *p);
63 * for(i = 0; i <= 7; i++)
64 * printf("%02x ", x.bar[i]);
68 * Coprocessor loads are not supported; I think this case is unimportant
71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72 * exception for the R6000.
73 * A store crossing a page boundary might be executed only partially.
74 * Undo the partial store in this case.
76 #include <linux/context_tracking.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
88 #include <asm/debug.h>
90 #include <asm/fpu_emulator.h>
92 #include <linux/uaccess.h>
94 #define STR(x) __STR(x)
98 UNALIGNED_ACTION_QUIET
,
99 UNALIGNED_ACTION_SIGNAL
,
100 UNALIGNED_ACTION_SHOW
,
102 #ifdef CONFIG_DEBUG_FS
103 static u32 unaligned_instructions
;
104 static u32 unaligned_action
;
106 #define unaligned_action UNALIGNED_ACTION_QUIET
108 extern void show_registers(struct pt_regs
*regs
);
111 #define _LoadHW(addr, value, res, type) \
113 __asm__ __volatile__ (".set\tnoat\n" \
114 "1:\t"type##_lb("%0", "0(%2)")"\n" \
115 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
121 ".section\t.fixup,\"ax\"\n\t" \
122 "4:\tli\t%1, %3\n\t" \
125 ".section\t__ex_table,\"a\"\n\t" \
126 STR(PTR)"\t1b, 4b\n\t" \
127 STR(PTR)"\t2b, 4b\n\t" \
129 : "=&r" (value), "=r" (res) \
130 : "r" (addr), "i" (-EFAULT)); \
133 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
134 #define _LoadW(addr, value, res, type) \
136 __asm__ __volatile__ ( \
137 "1:\t"type##_lwl("%0", "(%2)")"\n" \
138 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
142 ".section\t.fixup,\"ax\"\n\t" \
143 "4:\tli\t%1, %3\n\t" \
146 ".section\t__ex_table,\"a\"\n\t" \
147 STR(PTR)"\t1b, 4b\n\t" \
148 STR(PTR)"\t2b, 4b\n\t" \
150 : "=&r" (value), "=r" (res) \
151 : "r" (addr), "i" (-EFAULT)); \
154 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
155 /* For CPUs without lwl instruction */
156 #define _LoadW(addr, value, res, type) \
158 __asm__ __volatile__ ( \
161 "1:"type##_lb("%0", "0(%2)")"\n\t" \
162 "2:"type##_lbu("$1", "1(%2)")"\n\t" \
165 "3:"type##_lbu("$1", "2(%2)")"\n\t" \
168 "4:"type##_lbu("$1", "3(%2)")"\n\t" \
175 ".section\t.fixup,\"ax\"\n\t" \
176 "11:\tli\t%1, %3\n\t" \
179 ".section\t__ex_table,\"a\"\n\t" \
180 STR(PTR)"\t1b, 11b\n\t" \
181 STR(PTR)"\t2b, 11b\n\t" \
182 STR(PTR)"\t3b, 11b\n\t" \
183 STR(PTR)"\t4b, 11b\n\t" \
185 : "=&r" (value), "=r" (res) \
186 : "r" (addr), "i" (-EFAULT)); \
189 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
191 #define _LoadHWU(addr, value, res, type) \
193 __asm__ __volatile__ ( \
195 "1:\t"type##_lbu("%0", "0(%2)")"\n" \
196 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
203 ".section\t.fixup,\"ax\"\n\t" \
204 "4:\tli\t%1, %3\n\t" \
207 ".section\t__ex_table,\"a\"\n\t" \
208 STR(PTR)"\t1b, 4b\n\t" \
209 STR(PTR)"\t2b, 4b\n\t" \
211 : "=&r" (value), "=r" (res) \
212 : "r" (addr), "i" (-EFAULT)); \
215 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
216 #define _LoadWU(addr, value, res, type) \
218 __asm__ __volatile__ ( \
219 "1:\t"type##_lwl("%0", "(%2)")"\n" \
220 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
221 "dsll\t%0, %0, 32\n\t" \
222 "dsrl\t%0, %0, 32\n\t" \
226 "\t.section\t.fixup,\"ax\"\n\t" \
227 "4:\tli\t%1, %3\n\t" \
230 ".section\t__ex_table,\"a\"\n\t" \
231 STR(PTR)"\t1b, 4b\n\t" \
232 STR(PTR)"\t2b, 4b\n\t" \
234 : "=&r" (value), "=r" (res) \
235 : "r" (addr), "i" (-EFAULT)); \
238 #define _LoadDW(addr, value, res) \
240 __asm__ __volatile__ ( \
241 "1:\tldl\t%0, (%2)\n" \
242 "2:\tldr\t%0, 7(%2)\n\t" \
246 "\t.section\t.fixup,\"ax\"\n\t" \
247 "4:\tli\t%1, %3\n\t" \
250 ".section\t__ex_table,\"a\"\n\t" \
251 STR(PTR)"\t1b, 4b\n\t" \
252 STR(PTR)"\t2b, 4b\n\t" \
254 : "=&r" (value), "=r" (res) \
255 : "r" (addr), "i" (-EFAULT)); \
258 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
259 /* For CPUs without lwl and ldl instructions */
260 #define _LoadWU(addr, value, res, type) \
262 __asm__ __volatile__ ( \
265 "1:"type##_lbu("%0", "0(%2)")"\n\t" \
266 "2:"type##_lbu("$1", "1(%2)")"\n\t" \
269 "3:"type##_lbu("$1", "2(%2)")"\n\t" \
272 "4:"type##_lbu("$1", "3(%2)")"\n\t" \
279 ".section\t.fixup,\"ax\"\n\t" \
280 "11:\tli\t%1, %3\n\t" \
283 ".section\t__ex_table,\"a\"\n\t" \
284 STR(PTR)"\t1b, 11b\n\t" \
285 STR(PTR)"\t2b, 11b\n\t" \
286 STR(PTR)"\t3b, 11b\n\t" \
287 STR(PTR)"\t4b, 11b\n\t" \
289 : "=&r" (value), "=r" (res) \
290 : "r" (addr), "i" (-EFAULT)); \
293 #define _LoadDW(addr, value, res) \
295 __asm__ __volatile__ ( \
298 "1:lb\t%0, 0(%2)\n\t" \
299 "2:lbu\t $1, 1(%2)\n\t" \
300 "dsll\t%0, 0x8\n\t" \
302 "3:lbu\t$1, 2(%2)\n\t" \
303 "dsll\t%0, 0x8\n\t" \
305 "4:lbu\t$1, 3(%2)\n\t" \
306 "dsll\t%0, 0x8\n\t" \
308 "5:lbu\t$1, 4(%2)\n\t" \
309 "dsll\t%0, 0x8\n\t" \
311 "6:lbu\t$1, 5(%2)\n\t" \
312 "dsll\t%0, 0x8\n\t" \
314 "7:lbu\t$1, 6(%2)\n\t" \
315 "dsll\t%0, 0x8\n\t" \
317 "8:lbu\t$1, 7(%2)\n\t" \
318 "dsll\t%0, 0x8\n\t" \
324 ".section\t.fixup,\"ax\"\n\t" \
325 "11:\tli\t%1, %3\n\t" \
328 ".section\t__ex_table,\"a\"\n\t" \
329 STR(PTR)"\t1b, 11b\n\t" \
330 STR(PTR)"\t2b, 11b\n\t" \
331 STR(PTR)"\t3b, 11b\n\t" \
332 STR(PTR)"\t4b, 11b\n\t" \
333 STR(PTR)"\t5b, 11b\n\t" \
334 STR(PTR)"\t6b, 11b\n\t" \
335 STR(PTR)"\t7b, 11b\n\t" \
336 STR(PTR)"\t8b, 11b\n\t" \
338 : "=&r" (value), "=r" (res) \
339 : "r" (addr), "i" (-EFAULT)); \
342 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
345 #define _StoreHW(addr, value, res, type) \
347 __asm__ __volatile__ ( \
349 "1:\t"type##_sb("%1", "1(%2)")"\n" \
350 "srl\t$1, %1, 0x8\n" \
351 "2:\t"type##_sb("$1", "0(%2)")"\n" \
356 ".section\t.fixup,\"ax\"\n\t" \
357 "4:\tli\t%0, %3\n\t" \
360 ".section\t__ex_table,\"a\"\n\t" \
361 STR(PTR)"\t1b, 4b\n\t" \
362 STR(PTR)"\t2b, 4b\n\t" \
365 : "r" (value), "r" (addr), "i" (-EFAULT));\
368 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
369 #define _StoreW(addr, value, res, type) \
371 __asm__ __volatile__ ( \
372 "1:\t"type##_swl("%1", "(%2)")"\n" \
373 "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
377 ".section\t.fixup,\"ax\"\n\t" \
378 "4:\tli\t%0, %3\n\t" \
381 ".section\t__ex_table,\"a\"\n\t" \
382 STR(PTR)"\t1b, 4b\n\t" \
383 STR(PTR)"\t2b, 4b\n\t" \
386 : "r" (value), "r" (addr), "i" (-EFAULT)); \
389 #define _StoreDW(addr, value, res) \
391 __asm__ __volatile__ ( \
392 "1:\tsdl\t%1,(%2)\n" \
393 "2:\tsdr\t%1, 7(%2)\n\t" \
397 ".section\t.fixup,\"ax\"\n\t" \
398 "4:\tli\t%0, %3\n\t" \
401 ".section\t__ex_table,\"a\"\n\t" \
402 STR(PTR)"\t1b, 4b\n\t" \
403 STR(PTR)"\t2b, 4b\n\t" \
406 : "r" (value), "r" (addr), "i" (-EFAULT)); \
409 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
410 #define _StoreW(addr, value, res, type) \
412 __asm__ __volatile__ ( \
415 "1:"type##_sb("%1", "3(%2)")"\n\t" \
416 "srl\t$1, %1, 0x8\n\t" \
417 "2:"type##_sb("$1", "2(%2)")"\n\t" \
418 "srl\t$1, $1, 0x8\n\t" \
419 "3:"type##_sb("$1", "1(%2)")"\n\t" \
420 "srl\t$1, $1, 0x8\n\t" \
421 "4:"type##_sb("$1", "0(%2)")"\n\t" \
426 ".section\t.fixup,\"ax\"\n\t" \
427 "11:\tli\t%0, %3\n\t" \
430 ".section\t__ex_table,\"a\"\n\t" \
431 STR(PTR)"\t1b, 11b\n\t" \
432 STR(PTR)"\t2b, 11b\n\t" \
433 STR(PTR)"\t3b, 11b\n\t" \
434 STR(PTR)"\t4b, 11b\n\t" \
437 : "r" (value), "r" (addr), "i" (-EFAULT) \
441 #define _StoreDW(addr, value, res) \
443 __asm__ __volatile__ ( \
446 "1:sb\t%1, 7(%2)\n\t" \
447 "dsrl\t$1, %1, 0x8\n\t" \
448 "2:sb\t$1, 6(%2)\n\t" \
449 "dsrl\t$1, $1, 0x8\n\t" \
450 "3:sb\t$1, 5(%2)\n\t" \
451 "dsrl\t$1, $1, 0x8\n\t" \
452 "4:sb\t$1, 4(%2)\n\t" \
453 "dsrl\t$1, $1, 0x8\n\t" \
454 "5:sb\t$1, 3(%2)\n\t" \
455 "dsrl\t$1, $1, 0x8\n\t" \
456 "6:sb\t$1, 2(%2)\n\t" \
457 "dsrl\t$1, $1, 0x8\n\t" \
458 "7:sb\t$1, 1(%2)\n\t" \
459 "dsrl\t$1, $1, 0x8\n\t" \
460 "8:sb\t$1, 0(%2)\n\t" \
461 "dsrl\t$1, $1, 0x8\n\t" \
466 ".section\t.fixup,\"ax\"\n\t" \
467 "11:\tli\t%0, %3\n\t" \
470 ".section\t__ex_table,\"a\"\n\t" \
471 STR(PTR)"\t1b, 11b\n\t" \
472 STR(PTR)"\t2b, 11b\n\t" \
473 STR(PTR)"\t3b, 11b\n\t" \
474 STR(PTR)"\t4b, 11b\n\t" \
475 STR(PTR)"\t5b, 11b\n\t" \
476 STR(PTR)"\t6b, 11b\n\t" \
477 STR(PTR)"\t7b, 11b\n\t" \
478 STR(PTR)"\t8b, 11b\n\t" \
481 : "r" (value), "r" (addr), "i" (-EFAULT) \
485 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
487 #else /* __BIG_ENDIAN */
489 #define _LoadHW(addr, value, res, type) \
491 __asm__ __volatile__ (".set\tnoat\n" \
492 "1:\t"type##_lb("%0", "1(%2)")"\n" \
493 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
499 ".section\t.fixup,\"ax\"\n\t" \
500 "4:\tli\t%1, %3\n\t" \
503 ".section\t__ex_table,\"a\"\n\t" \
504 STR(PTR)"\t1b, 4b\n\t" \
505 STR(PTR)"\t2b, 4b\n\t" \
507 : "=&r" (value), "=r" (res) \
508 : "r" (addr), "i" (-EFAULT)); \
511 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
512 #define _LoadW(addr, value, res, type) \
514 __asm__ __volatile__ ( \
515 "1:\t"type##_lwl("%0", "3(%2)")"\n" \
516 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
520 ".section\t.fixup,\"ax\"\n\t" \
521 "4:\tli\t%1, %3\n\t" \
524 ".section\t__ex_table,\"a\"\n\t" \
525 STR(PTR)"\t1b, 4b\n\t" \
526 STR(PTR)"\t2b, 4b\n\t" \
528 : "=&r" (value), "=r" (res) \
529 : "r" (addr), "i" (-EFAULT)); \
532 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
533 /* For CPUs without lwl instruction */
534 #define _LoadW(addr, value, res, type) \
536 __asm__ __volatile__ ( \
539 "1:"type##_lb("%0", "3(%2)")"\n\t" \
540 "2:"type##_lbu("$1", "2(%2)")"\n\t" \
543 "3:"type##_lbu("$1", "1(%2)")"\n\t" \
546 "4:"type##_lbu("$1", "0(%2)")"\n\t" \
553 ".section\t.fixup,\"ax\"\n\t" \
554 "11:\tli\t%1, %3\n\t" \
557 ".section\t__ex_table,\"a\"\n\t" \
558 STR(PTR)"\t1b, 11b\n\t" \
559 STR(PTR)"\t2b, 11b\n\t" \
560 STR(PTR)"\t3b, 11b\n\t" \
561 STR(PTR)"\t4b, 11b\n\t" \
563 : "=&r" (value), "=r" (res) \
564 : "r" (addr), "i" (-EFAULT)); \
567 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
570 #define _LoadHWU(addr, value, res, type) \
572 __asm__ __volatile__ ( \
574 "1:\t"type##_lbu("%0", "1(%2)")"\n" \
575 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
582 ".section\t.fixup,\"ax\"\n\t" \
583 "4:\tli\t%1, %3\n\t" \
586 ".section\t__ex_table,\"a\"\n\t" \
587 STR(PTR)"\t1b, 4b\n\t" \
588 STR(PTR)"\t2b, 4b\n\t" \
590 : "=&r" (value), "=r" (res) \
591 : "r" (addr), "i" (-EFAULT)); \
594 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
595 #define _LoadWU(addr, value, res, type) \
597 __asm__ __volatile__ ( \
598 "1:\t"type##_lwl("%0", "3(%2)")"\n" \
599 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
600 "dsll\t%0, %0, 32\n\t" \
601 "dsrl\t%0, %0, 32\n\t" \
605 "\t.section\t.fixup,\"ax\"\n\t" \
606 "4:\tli\t%1, %3\n\t" \
609 ".section\t__ex_table,\"a\"\n\t" \
610 STR(PTR)"\t1b, 4b\n\t" \
611 STR(PTR)"\t2b, 4b\n\t" \
613 : "=&r" (value), "=r" (res) \
614 : "r" (addr), "i" (-EFAULT)); \
617 #define _LoadDW(addr, value, res) \
619 __asm__ __volatile__ ( \
620 "1:\tldl\t%0, 7(%2)\n" \
621 "2:\tldr\t%0, (%2)\n\t" \
625 "\t.section\t.fixup,\"ax\"\n\t" \
626 "4:\tli\t%1, %3\n\t" \
629 ".section\t__ex_table,\"a\"\n\t" \
630 STR(PTR)"\t1b, 4b\n\t" \
631 STR(PTR)"\t2b, 4b\n\t" \
633 : "=&r" (value), "=r" (res) \
634 : "r" (addr), "i" (-EFAULT)); \
637 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
638 /* For CPUs without lwl and ldl instructions */
639 #define _LoadWU(addr, value, res, type) \
641 __asm__ __volatile__ ( \
644 "1:"type##_lbu("%0", "3(%2)")"\n\t" \
645 "2:"type##_lbu("$1", "2(%2)")"\n\t" \
648 "3:"type##_lbu("$1", "1(%2)")"\n\t" \
651 "4:"type##_lbu("$1", "0(%2)")"\n\t" \
658 ".section\t.fixup,\"ax\"\n\t" \
659 "11:\tli\t%1, %3\n\t" \
662 ".section\t__ex_table,\"a\"\n\t" \
663 STR(PTR)"\t1b, 11b\n\t" \
664 STR(PTR)"\t2b, 11b\n\t" \
665 STR(PTR)"\t3b, 11b\n\t" \
666 STR(PTR)"\t4b, 11b\n\t" \
668 : "=&r" (value), "=r" (res) \
669 : "r" (addr), "i" (-EFAULT)); \
672 #define _LoadDW(addr, value, res) \
674 __asm__ __volatile__ ( \
677 "1:lb\t%0, 7(%2)\n\t" \
678 "2:lbu\t$1, 6(%2)\n\t" \
679 "dsll\t%0, 0x8\n\t" \
681 "3:lbu\t$1, 5(%2)\n\t" \
682 "dsll\t%0, 0x8\n\t" \
684 "4:lbu\t$1, 4(%2)\n\t" \
685 "dsll\t%0, 0x8\n\t" \
687 "5:lbu\t$1, 3(%2)\n\t" \
688 "dsll\t%0, 0x8\n\t" \
690 "6:lbu\t$1, 2(%2)\n\t" \
691 "dsll\t%0, 0x8\n\t" \
693 "7:lbu\t$1, 1(%2)\n\t" \
694 "dsll\t%0, 0x8\n\t" \
696 "8:lbu\t$1, 0(%2)\n\t" \
697 "dsll\t%0, 0x8\n\t" \
703 ".section\t.fixup,\"ax\"\n\t" \
704 "11:\tli\t%1, %3\n\t" \
707 ".section\t__ex_table,\"a\"\n\t" \
708 STR(PTR)"\t1b, 11b\n\t" \
709 STR(PTR)"\t2b, 11b\n\t" \
710 STR(PTR)"\t3b, 11b\n\t" \
711 STR(PTR)"\t4b, 11b\n\t" \
712 STR(PTR)"\t5b, 11b\n\t" \
713 STR(PTR)"\t6b, 11b\n\t" \
714 STR(PTR)"\t7b, 11b\n\t" \
715 STR(PTR)"\t8b, 11b\n\t" \
717 : "=&r" (value), "=r" (res) \
718 : "r" (addr), "i" (-EFAULT)); \
720 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
722 #define _StoreHW(addr, value, res, type) \
724 __asm__ __volatile__ ( \
726 "1:\t"type##_sb("%1", "0(%2)")"\n" \
727 "srl\t$1,%1, 0x8\n" \
728 "2:\t"type##_sb("$1", "1(%2)")"\n" \
733 ".section\t.fixup,\"ax\"\n\t" \
734 "4:\tli\t%0, %3\n\t" \
737 ".section\t__ex_table,\"a\"\n\t" \
738 STR(PTR)"\t1b, 4b\n\t" \
739 STR(PTR)"\t2b, 4b\n\t" \
742 : "r" (value), "r" (addr), "i" (-EFAULT));\
745 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
746 #define _StoreW(addr, value, res, type) \
748 __asm__ __volatile__ ( \
749 "1:\t"type##_swl("%1", "3(%2)")"\n" \
750 "2:\t"type##_swr("%1", "(%2)")"\n\t"\
754 ".section\t.fixup,\"ax\"\n\t" \
755 "4:\tli\t%0, %3\n\t" \
758 ".section\t__ex_table,\"a\"\n\t" \
759 STR(PTR)"\t1b, 4b\n\t" \
760 STR(PTR)"\t2b, 4b\n\t" \
763 : "r" (value), "r" (addr), "i" (-EFAULT)); \
766 #define _StoreDW(addr, value, res) \
768 __asm__ __volatile__ ( \
769 "1:\tsdl\t%1, 7(%2)\n" \
770 "2:\tsdr\t%1, (%2)\n\t" \
774 ".section\t.fixup,\"ax\"\n\t" \
775 "4:\tli\t%0, %3\n\t" \
778 ".section\t__ex_table,\"a\"\n\t" \
779 STR(PTR)"\t1b, 4b\n\t" \
780 STR(PTR)"\t2b, 4b\n\t" \
783 : "r" (value), "r" (addr), "i" (-EFAULT)); \
786 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
787 /* For CPUs without swl and sdl instructions */
788 #define _StoreW(addr, value, res, type) \
790 __asm__ __volatile__ ( \
793 "1:"type##_sb("%1", "0(%2)")"\n\t" \
794 "srl\t$1, %1, 0x8\n\t" \
795 "2:"type##_sb("$1", "1(%2)")"\n\t" \
796 "srl\t$1, $1, 0x8\n\t" \
797 "3:"type##_sb("$1", "2(%2)")"\n\t" \
798 "srl\t$1, $1, 0x8\n\t" \
799 "4:"type##_sb("$1", "3(%2)")"\n\t" \
804 ".section\t.fixup,\"ax\"\n\t" \
805 "11:\tli\t%0, %3\n\t" \
808 ".section\t__ex_table,\"a\"\n\t" \
809 STR(PTR)"\t1b, 11b\n\t" \
810 STR(PTR)"\t2b, 11b\n\t" \
811 STR(PTR)"\t3b, 11b\n\t" \
812 STR(PTR)"\t4b, 11b\n\t" \
815 : "r" (value), "r" (addr), "i" (-EFAULT) \
819 #define _StoreDW(addr, value, res) \
821 __asm__ __volatile__ ( \
824 "1:sb\t%1, 0(%2)\n\t" \
825 "dsrl\t$1, %1, 0x8\n\t" \
826 "2:sb\t$1, 1(%2)\n\t" \
827 "dsrl\t$1, $1, 0x8\n\t" \
828 "3:sb\t$1, 2(%2)\n\t" \
829 "dsrl\t$1, $1, 0x8\n\t" \
830 "4:sb\t$1, 3(%2)\n\t" \
831 "dsrl\t$1, $1, 0x8\n\t" \
832 "5:sb\t$1, 4(%2)\n\t" \
833 "dsrl\t$1, $1, 0x8\n\t" \
834 "6:sb\t$1, 5(%2)\n\t" \
835 "dsrl\t$1, $1, 0x8\n\t" \
836 "7:sb\t$1, 6(%2)\n\t" \
837 "dsrl\t$1, $1, 0x8\n\t" \
838 "8:sb\t$1, 7(%2)\n\t" \
839 "dsrl\t$1, $1, 0x8\n\t" \
844 ".section\t.fixup,\"ax\"\n\t" \
845 "11:\tli\t%0, %3\n\t" \
848 ".section\t__ex_table,\"a\"\n\t" \
849 STR(PTR)"\t1b, 11b\n\t" \
850 STR(PTR)"\t2b, 11b\n\t" \
851 STR(PTR)"\t3b, 11b\n\t" \
852 STR(PTR)"\t4b, 11b\n\t" \
853 STR(PTR)"\t5b, 11b\n\t" \
854 STR(PTR)"\t6b, 11b\n\t" \
855 STR(PTR)"\t7b, 11b\n\t" \
856 STR(PTR)"\t8b, 11b\n\t" \
859 : "r" (value), "r" (addr), "i" (-EFAULT) \
863 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
866 #define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
867 #define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
868 #define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
869 #define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
870 #define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
871 #define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
872 #define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
873 #define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
874 #define LoadDW(addr, value, res) _LoadDW(addr, value, res)
876 #define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
877 #define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
878 #define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
879 #define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
880 #define StoreDW(addr, value, res) _StoreDW(addr, value, res)
882 static void emulate_load_store_insn(struct pt_regs
*regs
,
883 void __user
*addr
, unsigned int __user
*pc
)
885 unsigned long origpc
, orig31
, value
;
886 union mips_instruction insn
;
891 origpc
= (unsigned long)pc
;
892 orig31
= regs
->regs
[31];
894 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS
, 1, regs
, 0);
897 * This load never faults.
899 __get_user(insn
.word
, pc
);
901 switch (insn
.i_format
.opcode
) {
903 * These are instructions that a compiler doesn't generate. We
904 * can assume therefore that the code is MIPS-aware and
905 * really buggy. Emulating these instructions would break the
914 * For these instructions the only way to create an address
915 * error is an attempted access to kernel/supervisor address
932 * The remaining opcodes are the ones that are really of
936 if (insn
.dsp_format
.func
== lx_op
) {
937 switch (insn
.dsp_format
.op
) {
939 if (!access_ok(VERIFY_READ
, addr
, 4))
941 LoadW(addr
, value
, res
);
944 compute_return_epc(regs
);
945 regs
->regs
[insn
.dsp_format
.rd
] = value
;
948 if (!access_ok(VERIFY_READ
, addr
, 2))
950 LoadHW(addr
, value
, res
);
953 compute_return_epc(regs
);
954 regs
->regs
[insn
.dsp_format
.rd
] = value
;
963 * we can land here only from kernel accessing user
964 * memory, so we need to "switch" the address limit to
965 * user space, so that address check can work properly.
969 switch (insn
.spec3_format
.func
) {
971 if (!access_ok(VERIFY_READ
, addr
, 2)) {
975 LoadHWE(addr
, value
, res
);
980 compute_return_epc(regs
);
981 regs
->regs
[insn
.spec3_format
.rt
] = value
;
984 if (!access_ok(VERIFY_READ
, addr
, 4)) {
988 LoadWE(addr
, value
, res
);
993 compute_return_epc(regs
);
994 regs
->regs
[insn
.spec3_format
.rt
] = value
;
997 if (!access_ok(VERIFY_READ
, addr
, 2)) {
1001 LoadHWUE(addr
, value
, res
);
1006 compute_return_epc(regs
);
1007 regs
->regs
[insn
.spec3_format
.rt
] = value
;
1010 if (!access_ok(VERIFY_WRITE
, addr
, 2)) {
1014 compute_return_epc(regs
);
1015 value
= regs
->regs
[insn
.spec3_format
.rt
];
1016 StoreHWE(addr
, value
, res
);
1023 if (!access_ok(VERIFY_WRITE
, addr
, 4)) {
1027 compute_return_epc(regs
);
1028 value
= regs
->regs
[insn
.spec3_format
.rt
];
1029 StoreWE(addr
, value
, res
);
1044 if (!access_ok(VERIFY_READ
, addr
, 2))
1047 if (IS_ENABLED(CONFIG_EVA
)) {
1048 if (uaccess_kernel())
1049 LoadHW(addr
, value
, res
);
1051 LoadHWE(addr
, value
, res
);
1053 LoadHW(addr
, value
, res
);
1058 compute_return_epc(regs
);
1059 regs
->regs
[insn
.i_format
.rt
] = value
;
1063 if (!access_ok(VERIFY_READ
, addr
, 4))
1066 if (IS_ENABLED(CONFIG_EVA
)) {
1067 if (uaccess_kernel())
1068 LoadW(addr
, value
, res
);
1070 LoadWE(addr
, value
, res
);
1072 LoadW(addr
, value
, res
);
1077 compute_return_epc(regs
);
1078 regs
->regs
[insn
.i_format
.rt
] = value
;
1082 if (!access_ok(VERIFY_READ
, addr
, 2))
1085 if (IS_ENABLED(CONFIG_EVA
)) {
1086 if (uaccess_kernel())
1087 LoadHWU(addr
, value
, res
);
1089 LoadHWUE(addr
, value
, res
);
1091 LoadHWU(addr
, value
, res
);
1096 compute_return_epc(regs
);
1097 regs
->regs
[insn
.i_format
.rt
] = value
;
1103 * A 32-bit kernel might be running on a 64-bit processor. But
1104 * if we're on a 32-bit processor and an i-cache incoherency
1105 * or race makes us see a 64-bit instruction here the sdl/sdr
1106 * would blow up, so for now we don't handle unaligned 64-bit
1107 * instructions on 32-bit kernels.
1109 if (!access_ok(VERIFY_READ
, addr
, 4))
1112 LoadWU(addr
, value
, res
);
1115 compute_return_epc(regs
);
1116 regs
->regs
[insn
.i_format
.rt
] = value
;
1118 #endif /* CONFIG_64BIT */
1120 /* Cannot handle 64-bit instructions in 32-bit kernel */
1126 * A 32-bit kernel might be running on a 64-bit processor. But
1127 * if we're on a 32-bit processor and an i-cache incoherency
1128 * or race makes us see a 64-bit instruction here the sdl/sdr
1129 * would blow up, so for now we don't handle unaligned 64-bit
1130 * instructions on 32-bit kernels.
1132 if (!access_ok(VERIFY_READ
, addr
, 8))
1135 LoadDW(addr
, value
, res
);
1138 compute_return_epc(regs
);
1139 regs
->regs
[insn
.i_format
.rt
] = value
;
1141 #endif /* CONFIG_64BIT */
1143 /* Cannot handle 64-bit instructions in 32-bit kernel */
1147 if (!access_ok(VERIFY_WRITE
, addr
, 2))
1150 compute_return_epc(regs
);
1151 value
= regs
->regs
[insn
.i_format
.rt
];
1153 if (IS_ENABLED(CONFIG_EVA
)) {
1154 if (uaccess_kernel())
1155 StoreHW(addr
, value
, res
);
1157 StoreHWE(addr
, value
, res
);
1159 StoreHW(addr
, value
, res
);
1167 if (!access_ok(VERIFY_WRITE
, addr
, 4))
1170 compute_return_epc(regs
);
1171 value
= regs
->regs
[insn
.i_format
.rt
];
1173 if (IS_ENABLED(CONFIG_EVA
)) {
1174 if (uaccess_kernel())
1175 StoreW(addr
, value
, res
);
1177 StoreWE(addr
, value
, res
);
1179 StoreW(addr
, value
, res
);
1189 * A 32-bit kernel might be running on a 64-bit processor. But
1190 * if we're on a 32-bit processor and an i-cache incoherency
1191 * or race makes us see a 64-bit instruction here the sdl/sdr
1192 * would blow up, so for now we don't handle unaligned 64-bit
1193 * instructions on 32-bit kernels.
1195 if (!access_ok(VERIFY_WRITE
, addr
, 8))
1198 compute_return_epc(regs
);
1199 value
= regs
->regs
[insn
.i_format
.rt
];
1200 StoreDW(addr
, value
, res
);
1204 #endif /* CONFIG_64BIT */
1206 /* Cannot handle 64-bit instructions in 32-bit kernel */
1209 #ifdef CONFIG_MIPS_FP_SUPPORT
1216 void __user
*fault_addr
= NULL
;
1218 die_if_kernel("Unaligned FP access in kernel code", regs
);
1219 BUG_ON(!used_math());
1221 res
= fpu_emulator_cop1Handler(regs
, ¤t
->thread
.fpu
, 1,
1223 own_fpu(1); /* Restore FPU state. */
1225 /* Signal if something went wrong. */
1226 process_fpemu_return(res
, fault_addr
, 0);
1232 #endif /* CONFIG_MIPS_FP_SUPPORT */
1234 #ifdef CONFIG_CPU_HAS_MSA
1237 unsigned int wd
, preempted
;
1245 * If we've reached this point then userland should have taken
1246 * the MSA disabled exception & initialised vector context at
1247 * some point in the past.
1249 BUG_ON(!thread_msa_context_live());
1251 df
= insn
.msa_mi10_format
.df
;
1252 wd
= insn
.msa_mi10_format
.wd
;
1253 fpr
= ¤t
->thread
.fpu
.fpr
[wd
];
1255 switch (insn
.msa_mi10_format
.func
) {
1257 if (!access_ok(VERIFY_READ
, addr
, sizeof(*fpr
)))
1262 * If we have live MSA context keep track of
1263 * whether we get preempted in order to avoid
1264 * the register context we load being clobbered
1265 * by the live context as it's saved during
1266 * preemption. If we don't have live context
1267 * then it can't be saved to clobber the value
1270 preempted
= test_thread_flag(TIF_USEDMSA
);
1272 res
= __copy_from_user_inatomic(fpr
, addr
,
1278 * Update the hardware register if it is in use
1279 * by the task in this quantum, in order to
1280 * avoid having to save & restore the whole
1284 if (test_thread_flag(TIF_USEDMSA
)) {
1285 write_msa_wr(wd
, fpr
, df
);
1289 } while (preempted
);
1293 if (!access_ok(VERIFY_WRITE
, addr
, sizeof(*fpr
)))
1297 * Update from the hardware register if it is in use by
1298 * the task in this quantum, in order to avoid having to
1299 * save & restore the whole vector context.
1302 if (test_thread_flag(TIF_USEDMSA
))
1303 read_msa_wr(wd
, fpr
, df
);
1306 res
= __copy_to_user_inatomic(addr
, fpr
, sizeof(*fpr
));
1315 compute_return_epc(regs
);
1318 #endif /* CONFIG_CPU_HAS_MSA */
1320 #ifndef CONFIG_CPU_MIPSR6
1322 * COP2 is available to implementor for application specific use.
1323 * It's up to applications to register a notifier chain and do
1324 * whatever they have to do, including possible sending of signals.
1326 * This instruction has been reallocated in Release 6
1329 cu2_notifier_call_chain(CU2_LWC2_OP
, regs
);
1333 cu2_notifier_call_chain(CU2_LDC2_OP
, regs
);
1337 cu2_notifier_call_chain(CU2_SWC2_OP
, regs
);
1341 cu2_notifier_call_chain(CU2_SDC2_OP
, regs
);
1346 * Pheeee... We encountered an yet unknown instruction or
1347 * cache coherence problem. Die sucker, die ...
1352 #ifdef CONFIG_DEBUG_FS
1353 unaligned_instructions
++;
1359 /* roll back jump/branch */
1360 regs
->cp0_epc
= origpc
;
1361 regs
->regs
[31] = orig31
;
1362 /* Did we have an exception handler installed? */
1363 if (fixup_exception(regs
))
1366 die_if_kernel("Unhandled kernel unaligned access", regs
);
1367 force_sig(SIGSEGV
, current
);
1372 die_if_kernel("Unhandled kernel unaligned access", regs
);
1373 force_sig(SIGBUS
, current
);
1379 ("Unhandled kernel unaligned access or invalid instruction", regs
);
1380 force_sig(SIGILL
, current
);
1383 /* Recode table from 16-bit register notation to 32-bit GPR. */
1384 const int reg16to32
[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
1386 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
1387 static const int reg16to32st
[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
1389 static void emulate_load_store_microMIPS(struct pt_regs
*regs
,
1392 unsigned long value
;
1395 unsigned int reg
= 0, rvar
;
1396 unsigned long orig31
;
1400 unsigned long origpc
, contpc
;
1401 union mips_instruction insn
;
1402 struct mm_decoded_insn mminsn
;
1404 origpc
= regs
->cp0_epc
;
1405 orig31
= regs
->regs
[31];
1407 mminsn
.micro_mips_mode
= 1;
1410 * This load never faults.
1412 pc16
= (unsigned short __user
*)msk_isa16_mode(regs
->cp0_epc
);
1413 __get_user(halfword
, pc16
);
1415 contpc
= regs
->cp0_epc
+ 2;
1416 word
= ((unsigned int)halfword
<< 16);
1419 if (!mm_insn_16bit(halfword
)) {
1420 __get_user(halfword
, pc16
);
1422 contpc
= regs
->cp0_epc
+ 4;
1428 if (get_user(halfword
, pc16
))
1430 mminsn
.next_pc_inc
= 2;
1431 word
= ((unsigned int)halfword
<< 16);
1433 if (!mm_insn_16bit(halfword
)) {
1435 if (get_user(halfword
, pc16
))
1437 mminsn
.next_pc_inc
= 4;
1440 mminsn
.next_insn
= word
;
1442 insn
= (union mips_instruction
)(mminsn
.insn
);
1443 if (mm_isBranchInstr(regs
, mminsn
, &contpc
))
1444 insn
= (union mips_instruction
)(mminsn
.next_insn
);
1446 /* Parse instruction to find what to do */
1448 switch (insn
.mm_i_format
.opcode
) {
1451 switch (insn
.mm_x_format
.func
) {
1453 reg
= insn
.mm_x_format
.rd
;
1460 switch (insn
.mm_m_format
.func
) {
1462 reg
= insn
.mm_m_format
.rd
;
1466 if (!access_ok(VERIFY_READ
, addr
, 8))
1469 LoadW(addr
, value
, res
);
1472 regs
->regs
[reg
] = value
;
1474 LoadW(addr
, value
, res
);
1477 regs
->regs
[reg
+ 1] = value
;
1481 reg
= insn
.mm_m_format
.rd
;
1485 if (!access_ok(VERIFY_WRITE
, addr
, 8))
1488 value
= regs
->regs
[reg
];
1489 StoreW(addr
, value
, res
);
1493 value
= regs
->regs
[reg
+ 1];
1494 StoreW(addr
, value
, res
);
1501 reg
= insn
.mm_m_format
.rd
;
1505 if (!access_ok(VERIFY_READ
, addr
, 16))
1508 LoadDW(addr
, value
, res
);
1511 regs
->regs
[reg
] = value
;
1513 LoadDW(addr
, value
, res
);
1516 regs
->regs
[reg
+ 1] = value
;
1518 #endif /* CONFIG_64BIT */
1524 reg
= insn
.mm_m_format
.rd
;
1528 if (!access_ok(VERIFY_WRITE
, addr
, 16))
1531 value
= regs
->regs
[reg
];
1532 StoreDW(addr
, value
, res
);
1536 value
= regs
->regs
[reg
+ 1];
1537 StoreDW(addr
, value
, res
);
1541 #endif /* CONFIG_64BIT */
1546 reg
= insn
.mm_m_format
.rd
;
1548 if ((rvar
> 9) || !reg
)
1552 (VERIFY_READ
, addr
, 4 * (rvar
+ 1)))
1555 if (!access_ok(VERIFY_READ
, addr
, 4 * rvar
))
1560 for (i
= 16; rvar
; rvar
--, i
++) {
1561 LoadW(addr
, value
, res
);
1565 regs
->regs
[i
] = value
;
1567 if ((reg
& 0xf) == 9) {
1568 LoadW(addr
, value
, res
);
1572 regs
->regs
[30] = value
;
1575 LoadW(addr
, value
, res
);
1578 regs
->regs
[31] = value
;
1583 reg
= insn
.mm_m_format
.rd
;
1585 if ((rvar
> 9) || !reg
)
1589 (VERIFY_WRITE
, addr
, 4 * (rvar
+ 1)))
1592 if (!access_ok(VERIFY_WRITE
, addr
, 4 * rvar
))
1597 for (i
= 16; rvar
; rvar
--, i
++) {
1598 value
= regs
->regs
[i
];
1599 StoreW(addr
, value
, res
);
1604 if ((reg
& 0xf) == 9) {
1605 value
= regs
->regs
[30];
1606 StoreW(addr
, value
, res
);
1612 value
= regs
->regs
[31];
1613 StoreW(addr
, value
, res
);
1621 reg
= insn
.mm_m_format
.rd
;
1623 if ((rvar
> 9) || !reg
)
1627 (VERIFY_READ
, addr
, 8 * (rvar
+ 1)))
1630 if (!access_ok(VERIFY_READ
, addr
, 8 * rvar
))
1636 for (i
= 16; rvar
; rvar
--, i
++) {
1637 LoadDW(addr
, value
, res
);
1641 regs
->regs
[i
] = value
;
1643 if ((reg
& 0xf) == 9) {
1644 LoadDW(addr
, value
, res
);
1648 regs
->regs
[30] = value
;
1651 LoadDW(addr
, value
, res
);
1654 regs
->regs
[31] = value
;
1657 #endif /* CONFIG_64BIT */
1663 reg
= insn
.mm_m_format
.rd
;
1665 if ((rvar
> 9) || !reg
)
1669 (VERIFY_WRITE
, addr
, 8 * (rvar
+ 1)))
1672 if (!access_ok(VERIFY_WRITE
, addr
, 8 * rvar
))
1678 for (i
= 16; rvar
; rvar
--, i
++) {
1679 value
= regs
->regs
[i
];
1680 StoreDW(addr
, value
, res
);
1685 if ((reg
& 0xf) == 9) {
1686 value
= regs
->regs
[30];
1687 StoreDW(addr
, value
, res
);
1693 value
= regs
->regs
[31];
1694 StoreDW(addr
, value
, res
);
1699 #endif /* CONFIG_64BIT */
1703 /* LWC2, SWC2, LDC2, SDC2 are not serviced */
1709 switch (insn
.mm_m_format
.func
) {
1711 reg
= insn
.mm_m_format
.rd
;
1715 /* LL,SC,LLD,SCD are not serviced */
1718 #ifdef CONFIG_MIPS_FP_SUPPORT
1720 switch (insn
.mm_x_format
.func
) {
1733 case mm_swc132_op
: {
1734 void __user
*fault_addr
= NULL
;
1737 /* roll back jump/branch */
1738 regs
->cp0_epc
= origpc
;
1739 regs
->regs
[31] = orig31
;
1741 die_if_kernel("Unaligned FP access in kernel code", regs
);
1742 BUG_ON(!used_math());
1743 BUG_ON(!is_fpu_owner());
1745 res
= fpu_emulator_cop1Handler(regs
, ¤t
->thread
.fpu
, 1,
1747 own_fpu(1); /* restore FPU state */
1749 /* If something went wrong, signal */
1750 process_fpemu_return(res
, fault_addr
, 0);
1756 #endif /* CONFIG_MIPS_FP_SUPPORT */
1759 reg
= insn
.mm_i_format
.rt
;
1763 reg
= insn
.mm_i_format
.rt
;
1767 reg
= insn
.mm_i_format
.rt
;
1771 reg
= insn
.mm_i_format
.rt
;
1775 reg
= insn
.mm_i_format
.rt
;
1779 reg
= insn
.mm_i_format
.rt
;
1783 reg
= insn
.mm_i_format
.rt
;
1787 switch (insn
.mm16_m_format
.func
) {
1789 reg
= insn
.mm16_m_format
.rlist
;
1791 if (!access_ok(VERIFY_READ
, addr
, 4 * rvar
))
1794 for (i
= 16; rvar
; rvar
--, i
++) {
1795 LoadW(addr
, value
, res
);
1799 regs
->regs
[i
] = value
;
1801 LoadW(addr
, value
, res
);
1804 regs
->regs
[31] = value
;
1809 reg
= insn
.mm16_m_format
.rlist
;
1811 if (!access_ok(VERIFY_WRITE
, addr
, 4 * rvar
))
1814 for (i
= 16; rvar
; rvar
--, i
++) {
1815 value
= regs
->regs
[i
];
1816 StoreW(addr
, value
, res
);
1821 value
= regs
->regs
[31];
1822 StoreW(addr
, value
, res
);
1833 reg
= reg16to32
[insn
.mm16_rb_format
.rt
];
1837 reg
= reg16to32
[insn
.mm16_rb_format
.rt
];
1841 reg
= reg16to32st
[insn
.mm16_rb_format
.rt
];
1845 reg
= reg16to32st
[insn
.mm16_rb_format
.rt
];
1849 reg
= insn
.mm16_r5_format
.rt
;
1853 reg
= insn
.mm16_r5_format
.rt
;
1857 reg
= reg16to32
[insn
.mm16_r3_format
.rt
];
1865 if (!access_ok(VERIFY_READ
, addr
, 2))
1868 LoadHW(addr
, value
, res
);
1871 regs
->regs
[reg
] = value
;
1875 if (!access_ok(VERIFY_READ
, addr
, 2))
1878 LoadHWU(addr
, value
, res
);
1881 regs
->regs
[reg
] = value
;
1885 if (!access_ok(VERIFY_READ
, addr
, 4))
1888 LoadW(addr
, value
, res
);
1891 regs
->regs
[reg
] = value
;
1897 * A 32-bit kernel might be running on a 64-bit processor. But
1898 * if we're on a 32-bit processor and an i-cache incoherency
1899 * or race makes us see a 64-bit instruction here the sdl/sdr
1900 * would blow up, so for now we don't handle unaligned 64-bit
1901 * instructions on 32-bit kernels.
1903 if (!access_ok(VERIFY_READ
, addr
, 4))
1906 LoadWU(addr
, value
, res
);
1909 regs
->regs
[reg
] = value
;
1911 #endif /* CONFIG_64BIT */
1913 /* Cannot handle 64-bit instructions in 32-bit kernel */
1919 * A 32-bit kernel might be running on a 64-bit processor. But
1920 * if we're on a 32-bit processor and an i-cache incoherency
1921 * or race makes us see a 64-bit instruction here the sdl/sdr
1922 * would blow up, so for now we don't handle unaligned 64-bit
1923 * instructions on 32-bit kernels.
1925 if (!access_ok(VERIFY_READ
, addr
, 8))
1928 LoadDW(addr
, value
, res
);
1931 regs
->regs
[reg
] = value
;
1933 #endif /* CONFIG_64BIT */
1935 /* Cannot handle 64-bit instructions in 32-bit kernel */
1939 if (!access_ok(VERIFY_WRITE
, addr
, 2))
1942 value
= regs
->regs
[reg
];
1943 StoreHW(addr
, value
, res
);
1949 if (!access_ok(VERIFY_WRITE
, addr
, 4))
1952 value
= regs
->regs
[reg
];
1953 StoreW(addr
, value
, res
);
1961 * A 32-bit kernel might be running on a 64-bit processor. But
1962 * if we're on a 32-bit processor and an i-cache incoherency
1963 * or race makes us see a 64-bit instruction here the sdl/sdr
1964 * would blow up, so for now we don't handle unaligned 64-bit
1965 * instructions on 32-bit kernels.
1967 if (!access_ok(VERIFY_WRITE
, addr
, 8))
1970 value
= regs
->regs
[reg
];
1971 StoreDW(addr
, value
, res
);
1975 #endif /* CONFIG_64BIT */
1977 /* Cannot handle 64-bit instructions in 32-bit kernel */
1981 regs
->cp0_epc
= contpc
; /* advance or branch */
1983 #ifdef CONFIG_DEBUG_FS
1984 unaligned_instructions
++;
1989 /* roll back jump/branch */
1990 regs
->cp0_epc
= origpc
;
1991 regs
->regs
[31] = orig31
;
1992 /* Did we have an exception handler installed? */
1993 if (fixup_exception(regs
))
1996 die_if_kernel("Unhandled kernel unaligned access", regs
);
1997 force_sig(SIGSEGV
, current
);
2002 die_if_kernel("Unhandled kernel unaligned access", regs
);
2003 force_sig(SIGBUS
, current
);
2009 ("Unhandled kernel unaligned access or invalid instruction", regs
);
2010 force_sig(SIGILL
, current
);
2013 static void emulate_load_store_MIPS16e(struct pt_regs
*regs
, void __user
* addr
)
2015 unsigned long value
;
2018 unsigned long orig31
;
2020 unsigned long origpc
;
2021 union mips16e_instruction mips16inst
, oldinst
;
2022 unsigned int opcode
;
2025 origpc
= regs
->cp0_epc
;
2026 orig31
= regs
->regs
[31];
2027 pc16
= (unsigned short __user
*)msk_isa16_mode(origpc
);
2029 * This load never faults.
2031 __get_user(mips16inst
.full
, pc16
);
2032 oldinst
= mips16inst
;
2034 /* skip EXTEND instruction */
2035 if (mips16inst
.ri
.opcode
== MIPS16e_extend_op
) {
2038 __get_user(mips16inst
.full
, pc16
);
2039 } else if (delay_slot(regs
)) {
2040 /* skip jump instructions */
2041 /* JAL/JALX are 32 bits but have OPCODE in first short int */
2042 if (mips16inst
.ri
.opcode
== MIPS16e_jal_op
)
2045 if (get_user(mips16inst
.full
, pc16
))
2049 opcode
= mips16inst
.ri
.opcode
;
2051 case MIPS16e_i64_op
: /* I64 or RI64 instruction */
2052 switch (mips16inst
.i64
.func
) { /* I64/RI64 func field check */
2053 case MIPS16e_ldpc_func
:
2054 case MIPS16e_ldsp_func
:
2055 reg
= reg16to32
[mips16inst
.ri64
.ry
];
2058 case MIPS16e_sdsp_func
:
2059 reg
= reg16to32
[mips16inst
.ri64
.ry
];
2062 case MIPS16e_sdrasp_func
:
2063 reg
= 29; /* GPRSP */
2069 case MIPS16e_swsp_op
:
2070 reg
= reg16to32
[mips16inst
.ri
.rx
];
2071 if (extended
&& cpu_has_mips16e2
)
2072 switch (mips16inst
.ri
.imm
>> 5) {
2077 opcode
= MIPS16e_sh_op
;
2084 case MIPS16e_lwpc_op
:
2085 reg
= reg16to32
[mips16inst
.ri
.rx
];
2088 case MIPS16e_lwsp_op
:
2089 reg
= reg16to32
[mips16inst
.ri
.rx
];
2090 if (extended
&& cpu_has_mips16e2
)
2091 switch (mips16inst
.ri
.imm
>> 5) {
2096 opcode
= MIPS16e_lh_op
;
2099 opcode
= MIPS16e_lhu_op
;
2107 if (mips16inst
.i8
.func
!= MIPS16e_swrasp_func
)
2109 reg
= 29; /* GPRSP */
2113 reg
= reg16to32
[mips16inst
.rri
.ry
];
2120 case MIPS16e_lbu_op
:
2125 if (!access_ok(VERIFY_READ
, addr
, 2))
2128 LoadHW(addr
, value
, res
);
2131 MIPS16e_compute_return_epc(regs
, &oldinst
);
2132 regs
->regs
[reg
] = value
;
2135 case MIPS16e_lhu_op
:
2136 if (!access_ok(VERIFY_READ
, addr
, 2))
2139 LoadHWU(addr
, value
, res
);
2142 MIPS16e_compute_return_epc(regs
, &oldinst
);
2143 regs
->regs
[reg
] = value
;
2147 case MIPS16e_lwpc_op
:
2148 case MIPS16e_lwsp_op
:
2149 if (!access_ok(VERIFY_READ
, addr
, 4))
2152 LoadW(addr
, value
, res
);
2155 MIPS16e_compute_return_epc(regs
, &oldinst
);
2156 regs
->regs
[reg
] = value
;
2159 case MIPS16e_lwu_op
:
2162 * A 32-bit kernel might be running on a 64-bit processor. But
2163 * if we're on a 32-bit processor and an i-cache incoherency
2164 * or race makes us see a 64-bit instruction here the sdl/sdr
2165 * would blow up, so for now we don't handle unaligned 64-bit
2166 * instructions on 32-bit kernels.
2168 if (!access_ok(VERIFY_READ
, addr
, 4))
2171 LoadWU(addr
, value
, res
);
2174 MIPS16e_compute_return_epc(regs
, &oldinst
);
2175 regs
->regs
[reg
] = value
;
2177 #endif /* CONFIG_64BIT */
2179 /* Cannot handle 64-bit instructions in 32-bit kernel */
2186 * A 32-bit kernel might be running on a 64-bit processor. But
2187 * if we're on a 32-bit processor and an i-cache incoherency
2188 * or race makes us see a 64-bit instruction here the sdl/sdr
2189 * would blow up, so for now we don't handle unaligned 64-bit
2190 * instructions on 32-bit kernels.
2192 if (!access_ok(VERIFY_READ
, addr
, 8))
2195 LoadDW(addr
, value
, res
);
2198 MIPS16e_compute_return_epc(regs
, &oldinst
);
2199 regs
->regs
[reg
] = value
;
2201 #endif /* CONFIG_64BIT */
2203 /* Cannot handle 64-bit instructions in 32-bit kernel */
2207 if (!access_ok(VERIFY_WRITE
, addr
, 2))
2210 MIPS16e_compute_return_epc(regs
, &oldinst
);
2211 value
= regs
->regs
[reg
];
2212 StoreHW(addr
, value
, res
);
2218 case MIPS16e_swsp_op
:
2219 case MIPS16e_i8_op
: /* actually - MIPS16e_swrasp_func */
2220 if (!access_ok(VERIFY_WRITE
, addr
, 4))
2223 MIPS16e_compute_return_epc(regs
, &oldinst
);
2224 value
= regs
->regs
[reg
];
2225 StoreW(addr
, value
, res
);
2234 * A 32-bit kernel might be running on a 64-bit processor. But
2235 * if we're on a 32-bit processor and an i-cache incoherency
2236 * or race makes us see a 64-bit instruction here the sdl/sdr
2237 * would blow up, so for now we don't handle unaligned 64-bit
2238 * instructions on 32-bit kernels.
2240 if (!access_ok(VERIFY_WRITE
, addr
, 8))
2243 MIPS16e_compute_return_epc(regs
, &oldinst
);
2244 value
= regs
->regs
[reg
];
2245 StoreDW(addr
, value
, res
);
2249 #endif /* CONFIG_64BIT */
2251 /* Cannot handle 64-bit instructions in 32-bit kernel */
2256 * Pheeee... We encountered an yet unknown instruction or
2257 * cache coherence problem. Die sucker, die ...
2262 #ifdef CONFIG_DEBUG_FS
2263 unaligned_instructions
++;
2269 /* roll back jump/branch */
2270 regs
->cp0_epc
= origpc
;
2271 regs
->regs
[31] = orig31
;
2272 /* Did we have an exception handler installed? */
2273 if (fixup_exception(regs
))
2276 die_if_kernel("Unhandled kernel unaligned access", regs
);
2277 force_sig(SIGSEGV
, current
);
2282 die_if_kernel("Unhandled kernel unaligned access", regs
);
2283 force_sig(SIGBUS
, current
);
2289 ("Unhandled kernel unaligned access or invalid instruction", regs
);
2290 force_sig(SIGILL
, current
);
2293 asmlinkage
void do_ade(struct pt_regs
*regs
)
2295 enum ctx_state prev_state
;
2296 unsigned int __user
*pc
;
2299 prev_state
= exception_enter();
2300 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS
,
2301 1, regs
, regs
->cp0_badvaddr
);
2303 * Did we catch a fault trying to load an instruction?
2305 if (regs
->cp0_badvaddr
== regs
->cp0_epc
)
2308 if (user_mode(regs
) && !test_thread_flag(TIF_FIXADE
))
2310 if (unaligned_action
== UNALIGNED_ACTION_SIGNAL
)
2314 * Do branch emulation only if we didn't forward the exception.
2315 * This is all so but ugly ...
2319 * Are we running in microMIPS mode?
2321 if (get_isa16_mode(regs
->cp0_epc
)) {
2323 * Did we catch a fault trying to load an instruction in
2326 if (regs
->cp0_badvaddr
== msk_isa16_mode(regs
->cp0_epc
))
2328 if (unaligned_action
== UNALIGNED_ACTION_SHOW
)
2329 show_registers(regs
);
2331 if (cpu_has_mmips
) {
2333 if (!user_mode(regs
))
2335 emulate_load_store_microMIPS(regs
,
2336 (void __user
*)regs
->cp0_badvaddr
);
2342 if (cpu_has_mips16
) {
2344 if (!user_mode(regs
))
2346 emulate_load_store_MIPS16e(regs
,
2347 (void __user
*)regs
->cp0_badvaddr
);
2356 if (unaligned_action
== UNALIGNED_ACTION_SHOW
)
2357 show_registers(regs
);
2358 pc
= (unsigned int __user
*)exception_epc(regs
);
2361 if (!user_mode(regs
))
2363 emulate_load_store_insn(regs
, (void __user
*)regs
->cp0_badvaddr
, pc
);
2369 die_if_kernel("Kernel unaligned instruction access", regs
);
2370 force_sig(SIGBUS
, current
);
2373 * XXX On return from the signal handler we should advance the epc
2375 exception_exit(prev_state
);
2378 #ifdef CONFIG_DEBUG_FS
2379 static int __init
debugfs_unaligned(void)
2383 if (!mips_debugfs_dir
)
2385 d
= debugfs_create_u32("unaligned_instructions", S_IRUGO
,
2386 mips_debugfs_dir
, &unaligned_instructions
);
2389 d
= debugfs_create_u32("unaligned_action", S_IRUGO
| S_IWUSR
,
2390 mips_debugfs_dir
, &unaligned_action
);
2395 arch_initcall(debugfs_unaligned
);